diff --git a/.gitignore b/.gitignore index d1af97cbaea3b..8b2da4dc0832a 100644 --- a/.gitignore +++ b/.gitignore @@ -69,3 +69,6 @@ testfixtures_shared/ # Generated checkstyle_ide.xml x-pack/plugin/esql/src/main/generated-src/generated/ + +# JEnv +.java-version diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java index e62c26c7fbc01..3ab85ba69dc80 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java @@ -31,11 +31,15 @@ import org.gradle.api.tasks.bundling.Jar; import org.gradle.api.tasks.javadoc.Javadoc; import org.gradle.external.javadoc.CoreJavadocOptions; +import org.gradle.jvm.toolchain.JavaLanguageVersion; +import org.gradle.jvm.toolchain.JavaToolchainService; import org.gradle.language.base.plugins.LifecycleBasePlugin; import java.io.File; import java.util.Map; +import javax.inject.Inject; + import static org.elasticsearch.gradle.internal.conventions.util.Util.toStringable; import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; @@ -44,6 +48,14 @@ * common configuration for production code. */ public class ElasticsearchJavaPlugin implements Plugin { + + private final JavaToolchainService javaToolchains; + + @Inject + ElasticsearchJavaPlugin(JavaToolchainService javaToolchains) { + this.javaToolchains = javaToolchains; + } + @Override public void apply(Project project) { project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); @@ -55,7 +67,7 @@ public void apply(Project project) { // configureConfigurations(project); configureJars(project, buildParams.get()); configureJarManifest(project, buildParams.get()); - configureJavadoc(project); + configureJavadoc(project, buildParams.get()); testCompileOnlyDeps(project); } @@ -128,7 +140,7 @@ private static void configureJarManifest(Project project, BuildParameterExtensio project.getPluginManager().apply("nebula.info-jar"); } - private static void configureJavadoc(Project project) { + private void configureJavadoc(Project project, BuildParameterExtension buildParams) { project.getTasks().withType(Javadoc.class).configureEach(javadoc -> { /* * Generate docs using html5 to suppress a warning from `javadoc` @@ -136,6 +148,10 @@ private static void configureJavadoc(Project project) { */ CoreJavadocOptions javadocOptions = (CoreJavadocOptions) javadoc.getOptions(); javadocOptions.addBooleanOption("html5", true); + + javadoc.getJavadocTool().set(javaToolchains.javadocToolFor(spec -> { + spec.getLanguageVersion().set(JavaLanguageVersion.of(buildParams.getMinimumRuntimeVersion().getMajorVersion())); + })); }); TaskProvider javadoc = project.getTasks().withType(Javadoc.class).named("javadoc"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index da26cb66122ad..0e8dbb7fce26c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -17,12 +17,12 @@ import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.Task; +import org.gradle.api.file.FileSystemOperations; import org.gradle.api.file.ProjectLayout; import org.gradle.api.model.ObjectFactory; import org.gradle.api.plugins.JvmToolchainsPlugin; import org.gradle.api.provider.Provider; import org.gradle.api.provider.ProviderFactory; -import org.gradle.api.tasks.Copy; import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.TaskProvider; import org.gradle.jvm.toolchain.JavaToolchainService; @@ -54,11 +54,17 @@ public class InternalDistributionBwcSetupPlugin implements Plugin { private final ObjectFactory objectFactory; private ProviderFactory providerFactory; private JavaToolchainService toolChainService; + private FileSystemOperations fileSystemOperations; @Inject - public InternalDistributionBwcSetupPlugin(ObjectFactory objectFactory, ProviderFactory providerFactory) { + public InternalDistributionBwcSetupPlugin( + ObjectFactory objectFactory, + ProviderFactory providerFactory, + FileSystemOperations fileSystemOperations + ) { this.objectFactory = objectFactory; this.providerFactory = providerFactory; + this.fileSystemOperations = fileSystemOperations; } @Override @@ -76,7 +82,8 @@ public void apply(Project project) { providerFactory, objectFactory, toolChainService, - isCi + isCi, + fileSystemOperations ); }); } @@ -88,7 +95,8 @@ private static void configureBwcProject( ProviderFactory providerFactory, ObjectFactory objectFactory, JavaToolchainService toolChainService, - Boolean isCi + Boolean isCi, + FileSystemOperations fileSystemOperations ) { ProjectLayout layout = project.getLayout(); Provider versionInfoProvider = providerFactory.provider(() -> versionInfo); @@ -120,11 +128,18 @@ private static void configureBwcProject( List distributionProjects = resolveArchiveProjects(checkoutDir.get(), bwcVersion.get()); // Setup gradle user home directory - project.getTasks().register("setupGradleUserHome", Copy.class, copy -> { - copy.into(project.getGradle().getGradleUserHomeDir().getAbsolutePath() + "-" + project.getName()); - copy.from(project.getGradle().getGradleUserHomeDir().getAbsolutePath(), copySpec -> { - copySpec.include("gradle.properties"); - copySpec.include("init.d/*"); + // We don't use a normal `Copy` task here as snapshotting the entire gradle user home is very expensive. This task is cheap, so + // up-to-date checking doesn't buy us much + project.getTasks().register("setupGradleUserHome", task -> { + task.doLast(t -> { + fileSystemOperations.copy(copy -> { + String gradleUserHome = project.getGradle().getGradleUserHomeDir().getAbsolutePath(); + copy.into(gradleUserHome + "-" + project.getName()); + copy.from(gradleUserHome, copySpec -> { + copySpec.include("gradle.properties"); + copySpec.include("init.d/*"); + }); + }); }); }); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java index 7c488e6e73fee..5402e0a04fe8f 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java @@ -86,14 +86,14 @@ public void apply(Project project) { configurePreviewFeatures(project, javaExtension.getSourceSets().getByName(SourceSet.TEST_SOURCE_SET_NAME), 21); for (int javaVersion : mainVersions) { String mainSourceSetName = SourceSet.MAIN_SOURCE_SET_NAME + javaVersion; - SourceSet mainSourceSet = addSourceSet(project, javaExtension, mainSourceSetName, mainSourceSets, javaVersion); + SourceSet mainSourceSet = addSourceSet(project, javaExtension, mainSourceSetName, mainSourceSets, javaVersion, true); configureSourceSetInJar(project, mainSourceSet, javaVersion); addJar(project, mainSourceSet, javaVersion); mainSourceSets.add(mainSourceSetName); testSourceSets.add(mainSourceSetName); String testSourceSetName = SourceSet.TEST_SOURCE_SET_NAME + javaVersion; - SourceSet testSourceSet = addSourceSet(project, javaExtension, testSourceSetName, testSourceSets, javaVersion); + SourceSet testSourceSet = addSourceSet(project, javaExtension, testSourceSetName, testSourceSets, javaVersion, false); testSourceSets.add(testSourceSetName); createTestTask(project, buildParams, testSourceSet, javaVersion, mainSourceSets); } @@ -121,7 +121,8 @@ private SourceSet addSourceSet( JavaPluginExtension javaExtension, String sourceSetName, List parentSourceSets, - int javaVersion + int javaVersion, + boolean isMainSourceSet ) { SourceSet sourceSet = javaExtension.getSourceSets().maybeCreate(sourceSetName); for (String parentSourceSetName : parentSourceSets) { @@ -135,6 +136,13 @@ private SourceSet addSourceSet( CompileOptions compileOptions = compileTask.getOptions(); compileOptions.getRelease().set(javaVersion); }); + if (isMainSourceSet) { + project.getTasks().create(sourceSet.getJavadocTaskName(), Javadoc.class, javadocTask -> { + javadocTask.getJavadocTool().set(javaToolchains.javadocToolFor(spec -> { + spec.getLanguageVersion().set(JavaLanguageVersion.of(javaVersion)); + })); + }); + } configurePreviewFeatures(project, sourceSet, javaVersion); // Since we configure MRJAR sourcesets to allow preview apis, class signatures for those diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index f55d90933ed61..94fc6f2cb9025 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -9,7 +9,7 @@ ## should create one or more files in the jvm.options.d ## directory containing your adjustments. ## -## See https://www.elastic.co/guide/en/elasticsearch/reference/@project.minor.version@/jvm-options.html +## See https://www.elastic.co/guide/en/elasticsearch/reference/@project.minor.version@/advanced-configuration.html#set-jvm-options ## for more information. ## ################################################################ diff --git a/docs/build.gradle b/docs/build.gradle index dec0de8ffa844..93b7277327280 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -130,8 +130,9 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach { setting 'xpack.security.enabled', 'true' setting 'xpack.security.authc.api_key.enabled', 'true' setting 'xpack.security.authc.token.enabled', 'true' - // disable the ILM history for doc tests to avoid potential lingering tasks that'd cause test flakiness + // disable the ILM and SLM history for doc tests to avoid potential lingering tasks that'd cause test flakiness setting 'indices.lifecycle.history_index_enabled', 'false' + setting 'slm.history_index_enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.authc.realms.file.file.order', '0' setting 'xpack.security.authc.realms.native.native.order', '1' diff --git a/docs/changelog/118266.yaml b/docs/changelog/118266.yaml new file mode 100644 index 0000000000000..1b14b12b973c5 --- /dev/null +++ b/docs/changelog/118266.yaml @@ -0,0 +1,5 @@ +pr: 118266 +summary: Prevent data nodes from sending stack traces to coordinator when `error_trace=false` +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/118585.yaml b/docs/changelog/118585.yaml new file mode 100644 index 0000000000000..4caa5efabbd33 --- /dev/null +++ b/docs/changelog/118585.yaml @@ -0,0 +1,7 @@ +pr: 118585 +summary: Add a generic `rescorer` retriever based on the search request's rescore + functionality +area: Ranking +type: feature +issues: + - 118327 diff --git a/docs/changelog/118757.yaml b/docs/changelog/118757.yaml new file mode 100644 index 0000000000000..956e220f21aeb --- /dev/null +++ b/docs/changelog/118757.yaml @@ -0,0 +1,5 @@ +pr: 118757 +summary: Improve handling of nested fields in index reader wrappers +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/118823.yaml b/docs/changelog/118823.yaml new file mode 100644 index 0000000000000..b1afe1c873c17 --- /dev/null +++ b/docs/changelog/118823.yaml @@ -0,0 +1,5 @@ +pr: 118823 +summary: Fix attribute set equals +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/116944.yaml b/docs/changelog/118825.yaml similarity index 84% rename from docs/changelog/116944.yaml rename to docs/changelog/118825.yaml index e7833e49cf965..23170ec4705da 100644 --- a/docs/changelog/116944.yaml +++ b/docs/changelog/118825.yaml @@ -1,4 +1,4 @@ -pr: 116944 +pr: 118825 summary: "Remove support for type, fields, `copy_to` and boost in metadata field definition" area: Mapping type: breaking @@ -6,6 +6,6 @@ issues: [] breaking: title: "Remove support for type, fields, copy_to and boost in metadata field definition" area: Mapping - details: The type, fields, copy_to and boost parameters are no longer supported in metadata field definition + details: The type, fields, copy_to and boost parameters are no longer supported in metadata field definition starting with version 9. impact: Users providing type, fields, copy_to or boost as part of metadata field definition should remove them from their mappings. notable: false diff --git a/docs/changelog/118890.yaml b/docs/changelog/118890.yaml new file mode 100644 index 0000000000000..d3fc17157f130 --- /dev/null +++ b/docs/changelog/118890.yaml @@ -0,0 +1,5 @@ +pr: 118890 +summary: Add action to create index from a source index +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/119007.yaml b/docs/changelog/119007.yaml new file mode 100644 index 0000000000000..458101b68d454 --- /dev/null +++ b/docs/changelog/119007.yaml @@ -0,0 +1,6 @@ +pr: 119007 +summary: Block-writes cannot be added after read-only +area: Data streams +type: bug +issues: + - 119002 diff --git a/docs/reference/connector/docs/connectors-content-extraction.asciidoc b/docs/reference/connector/docs/connectors-content-extraction.asciidoc index a87d38c9bf531..744fe1d87cb45 100644 --- a/docs/reference/connector/docs/connectors-content-extraction.asciidoc +++ b/docs/reference/connector/docs/connectors-content-extraction.asciidoc @@ -8,7 +8,7 @@ The logic for content extraction is defined in {connectors-python}/connectors/ut While intended primarily for PDF and Microsoft Office formats, you can use any of the <>. Enterprise Search uses an {ref}/ingest.html[Elasticsearch ingest pipeline^] to power the web crawler's binary content extraction. -The default pipeline, `ent-search-generic-ingestion`, is automatically created when Enterprise Search first starts. +The default pipeline, `search-default-ingestion`, is automatically created when Enterprise Search first starts. You can {ref}/ingest.html#create-manage-ingest-pipelines[view^] this pipeline in Kibana. Customizing your pipeline usage is also an option. diff --git a/docs/reference/connector/docs/connectors-filter-extract-transform.asciidoc b/docs/reference/connector/docs/connectors-filter-extract-transform.asciidoc index 278478c908bf0..62a99928bfb46 100644 --- a/docs/reference/connector/docs/connectors-filter-extract-transform.asciidoc +++ b/docs/reference/connector/docs/connectors-filter-extract-transform.asciidoc @@ -13,7 +13,7 @@ The following diagram provides an overview of how content extraction, sync rules [.screenshot] image::images/pipelines-extraction-sync-rules.png[Architecture diagram of data pipeline with content extraction, sync rules, and ingest pipelines] -By default, only the connector specific logic (2) and the default `ent-search-generic-ingestion` pipeline (6) extract and transform your data, as configured in your deployment. +By default, only the connector specific logic (2) and the default `search-default-ingestion` pipeline (6) extract and transform your data, as configured in your deployment. The following tools are available for more advanced use cases: @@ -50,4 +50,4 @@ Use ingest pipelines for data enrichment, normalization, and more. Elastic connectors use a default ingest pipeline, which you can copy and customize to meet your needs. -Refer to {ref}/ingest-pipeline-search.html[ingest pipelines in Search] in the {es} documentation. \ No newline at end of file +Refer to {ref}/ingest-pipeline-search.html[ingest pipelines in Search] in the {es} documentation. diff --git a/docs/reference/esql/functions/examples/bucket.asciidoc b/docs/reference/esql/functions/examples/bucket.asciidoc index 4afea30660339..264efc191748f 100644 --- a/docs/reference/esql/functions/examples/bucket.asciidoc +++ b/docs/reference/esql/functions/examples/bucket.asciidoc @@ -116,4 +116,18 @@ include::{esql-specs}/bucket.csv-spec[tag=reuseGroupingFunctionWithExpression] |=== include::{esql-specs}/bucket.csv-spec[tag=reuseGroupingFunctionWithExpression-result] |=== +Sometimes you need to change the start value of each bucket by a given duration (similar to date histogram +aggregation's <> parameter). To do so, you will need to +take into account how the language handles expressions within the `STATS` command: if these contain functions or +arithmetic operators, a virtual `EVAL` is inserted before and/or after the `STATS` command. Consequently, a double +compensation is needed to adjust the bucketed date value before the aggregation and then again after. For instance, +inserting a negative offset of `1 hour` to buckets of `1 year` looks like this: +[source.merge.styled,esql] +---- +include::{esql-specs}/bucket.csv-spec[tag=bucketWithOffset] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/bucket.csv-spec[tag=bucketWithOffset-result] +|=== diff --git a/docs/reference/esql/functions/kibana/definition/bucket.json b/docs/reference/esql/functions/kibana/definition/bucket.json index 18802f5ff8fef..3d96de05c8407 100644 --- a/docs/reference/esql/functions/kibana/definition/bucket.json +++ b/docs/reference/esql/functions/kibana/definition/bucket.json @@ -1598,7 +1598,8 @@ "FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS c = COUNT(1) BY b = BUCKET(salary, 5000.)\n| SORT b", "FROM sample_data \n| WHERE @timestamp >= NOW() - 1 day and @timestamp < NOW()\n| STATS COUNT(*) BY bucket = BUCKET(@timestamp, 25, NOW() - 1 day, NOW())", "FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS AVG(salary) BY bucket = BUCKET(hire_date, 20, \"1985-01-01T00:00:00Z\", \"1986-01-01T00:00:00Z\")\n| SORT bucket", - "FROM employees\n| STATS s1 = b1 + 1, s2 = BUCKET(salary / 1000 + 999, 50.) + 2 BY b1 = BUCKET(salary / 100 + 99, 50.), b2 = BUCKET(salary / 1000 + 999, 50.)\n| SORT b1, b2\n| KEEP s1, b1, s2, b2" + "FROM employees\n| STATS s1 = b1 + 1, s2 = BUCKET(salary / 1000 + 999, 50.) + 2 BY b1 = BUCKET(salary / 100 + 99, 50.), b2 = BUCKET(salary / 1000 + 999, 50.)\n| SORT b1, b2\n| KEEP s1, b1, s2, b2", + "FROM employees \n| STATS dates = VALUES(birth_date) BY b = BUCKET(birth_date + 1 HOUR, 1 YEAR) - 1 HOUR\n| EVAL d_count = MV_COUNT(dates)\n| SORT d_count\n| LIMIT 3" ], "preview" : false, "snapshot_only" : false diff --git a/docs/reference/ingest/search-inference-processing.asciidoc b/docs/reference/ingest/search-inference-processing.asciidoc index 006cc96294477..73642b3bb3447 100644 --- a/docs/reference/ingest/search-inference-processing.asciidoc +++ b/docs/reference/ingest/search-inference-processing.asciidoc @@ -88,7 +88,7 @@ The `monitor_ml` <> is req To create the index-specific ML inference pipeline, go to *Search -> Content -> Indices -> -> Pipelines* in the Kibana UI. -If you only see the `ent-search-generic-ingestion` pipeline, you will need to click *Copy and customize* to create index-specific pipelines. +If you only see the `search-default-ingestion` pipeline, you will need to click *Copy and customize* to create index-specific pipelines. This will create the `{index_name}@ml-inference` pipeline. Once your index-specific ML inference pipeline is ready, you can add inference processors that use your ML trained models. diff --git a/docs/reference/ingest/search-ingest-pipelines.asciidoc b/docs/reference/ingest/search-ingest-pipelines.asciidoc index e414dacaab964..272c6ba2884b9 100644 --- a/docs/reference/ingest/search-ingest-pipelines.asciidoc +++ b/docs/reference/ingest/search-ingest-pipelines.asciidoc @@ -40,7 +40,7 @@ Considerations such as error handling, conditional execution, sequencing, versio To this end, when you create indices for search use cases, (including {enterprise-search-ref}/crawler.html[Elastic web crawler], <>. , and API indices), each index already has a pipeline set up with several processors that optimize your content for search. -This pipeline is called `ent-search-generic-ingestion`. +This pipeline is called `search-default-ingestion`. While it is a "managed" pipeline (meaning it should not be tampered with), you can view its details via the Kibana UI or the Elasticsearch API. You can also <>. @@ -56,14 +56,14 @@ This will not effect existing indices. Each index also provides the capability to easily create index-specific ingest pipelines with customizable processing. If you need that extra flexibility, you can create a custom pipeline by going to your pipeline settings and choosing to "copy and customize". -This will replace the index's use of `ent-search-generic-ingestion` with 3 newly generated pipelines: +This will replace the index's use of `search-default-ingestion` with 3 newly generated pipelines: 1. `` 2. `@custom` 3. `@ml-inference` -Like `ent-search-generic-ingestion`, the first of these is "managed", but the other two can and should be modified to fit your needs. -You can view these pipelines using the platform tools (Kibana UI, Elasticsearch API), and can also +Like `search-default-ingestion`, the first of these is "managed", but the other two can and should be modified to fit your needs. +You can view these pipelines using the platform tools (Kibana UI, Elasticsearch API), and can also <>. [discrete#ingest-pipeline-search-pipeline-settings] @@ -123,7 +123,7 @@ If the pipeline is not specified, the underscore-prefixed fields will actually b === Details [discrete#ingest-pipeline-search-details-generic-reference] -==== `ent-search-generic-ingestion` Reference +==== `search-default-ingestion` Reference You can access this pipeline with the <> or via Kibana's < Ingest Pipelines>> UI. @@ -149,7 +149,7 @@ If you want to make customizations, we recommend you utilize index-specific pipe [discrete#ingest-pipeline-search-details-generic-reference-params] ===== Control flow parameters -The `ent-search-generic-ingestion` pipeline does not always run all processors. +The `search-default-ingestion` pipeline does not always run all processors. It utilizes a feature of ingest pipelines to <> based on the contents of each individual document. * `_extract_binary_content` - if this field is present and has a value of `true` on a source document, the pipeline will attempt to run the `attachment`, `set_body`, and `remove_replacement_chars` processors. @@ -167,8 +167,8 @@ See <>. ==== Index-specific ingest pipelines In the Kibana UI for your index, by clicking on the Pipelines tab, then *Settings > Copy and customize*, you can quickly generate 3 pipelines which are specific to your index. -These 3 pipelines replace `ent-search-generic-ingestion` for the index. -There is nothing lost in this action, as the `` pipeline is a superset of functionality over the `ent-search-generic-ingestion` pipeline. +These 3 pipelines replace `search-default-ingestion` for the index. +There is nothing lost in this action, as the `` pipeline is a superset of functionality over the `search-default-ingestion` pipeline. [IMPORTANT] ==== @@ -179,7 +179,7 @@ Refer to the Elastic subscriptions pages for https://www.elastic.co/subscription [discrete#ingest-pipeline-search-details-specific-reference] ===== `` Reference -This pipeline looks and behaves a lot like the <>, but with <>. +This pipeline looks and behaves a lot like the <>, but with <>. [WARNING] ========================= @@ -197,7 +197,7 @@ If you want to make customizations, we recommend you utilize <>, the index-specific pipeline also defines: +In addition to the processors inherited from the <>, the index-specific pipeline also defines: * `index_ml_inference_pipeline` - this uses the <> processor to run the `@ml-inference` pipeline. This processor will only be run if the source document includes a `_run_ml_inference` field with the value `true`. @@ -206,7 +206,7 @@ In addition to the processors inherited from the <` pipeline does not always run all processors. +Like the `search-default-ingestion` pipeline, the `` pipeline does not always run all processors. In addition to the `_extract_binary_content` and `_reduce_whitespace` control flow parameters, the `` pipeline also supports: * `_run_ml_inference` - if this field is present and has a value of `true` on a source document, the pipeline will attempt to run the `index_ml_inference_pipeline` processor. @@ -220,7 +220,7 @@ See <>. ===== `@ml-inference` Reference This pipeline is empty to start (no processors), but can be added to via the Kibana UI either through the Pipelines tab of your index, or from the *Stack Management > Ingest Pipelines* page. -Unlike the `ent-search-generic-ingestion` pipeline and the `` pipeline, this pipeline is NOT "managed". +Unlike the `search-default-ingestion` pipeline and the `` pipeline, this pipeline is NOT "managed". It's possible to add one or more ML inference pipelines to an index in the *Content* UI. This pipeline will serve as a container for all of the ML inference pipelines configured for the index. @@ -241,7 +241,7 @@ The `monitor_ml` Elasticsearch cluster permission is required in order to manage This pipeline is empty to start (no processors), but can be added to via the Kibana UI either through the Pipelines tab of your index, or from the *Stack Management > Ingest Pipelines* page. -Unlike the `ent-search-generic-ingestion` pipeline and the `` pipeline, this pipeline is NOT "managed". +Unlike the `search-default-ingestion` pipeline and the `` pipeline, this pipeline is NOT "managed". You are encouraged to make additions and edits to this pipeline, provided its name remains the same. This provides a convenient hook from which to add custom processing and transformations for your data. @@ -272,9 +272,12 @@ extraction. These changes should be re-applied to each index's `@custom` pipeline in order to ensure a consistent data processing experience. In 8.5+, the <> is required *in addition* to the configurations mentioned in the {enterprise-search-ref}/crawler-managing.html#crawler-managing-binary-content[Elastic web crawler Guide]. -* `ent-search-generic-ingestion` - Since 8.5, Native Connectors, Connector Clients, and new (>8.4) Elastic web crawler indices will all make use of this pipeline by default. +* `ent-search-generic-ingestion` - Since 8.5, Native Connectors, Connector Clients, and new (>8.4) Elastic web crawler indices all made use of this pipeline by default. + This pipeline evolved into the `search-default-ingestion` pipeline. + +* `search-default-ingestion` - Since 9.0, Connectors have made use of this pipeline by default. You can <> above. - As this pipeline is "managed", any modifications that were made to `app_search_crawler` and/or `ent_search_crawler` should NOT be made to `ent-search-generic-ingestion`. + As this pipeline is "managed", any modifications that were made to `app_search_crawler` and/or `ent_search_crawler` should NOT be made to `search-default-ingestion`. Instead, if such customizations are desired, you should utilize <>, placing all modifications in the `@custom` pipeline(s). ============= diff --git a/docs/reference/ingest/search-nlp-tutorial.asciidoc b/docs/reference/ingest/search-nlp-tutorial.asciidoc index afdceeeb8bac2..b23a15c96b1a2 100644 --- a/docs/reference/ingest/search-nlp-tutorial.asciidoc +++ b/docs/reference/ingest/search-nlp-tutorial.asciidoc @@ -164,8 +164,8 @@ Now it's time to create an inference pipeline. 1. From the overview page for your `search-photo-comments` index in "Search", click the *Pipelines* tab. By default, Elasticsearch does not create any index-specific ingest pipelines. -2. Because we want to customize these pipelines, we need to *Copy and customize* the `ent-search-generic-ingestion` ingest pipeline. -Find this option above the settings for the `ent-search-generic-ingestion` ingest pipeline. +2. Because we want to customize these pipelines, we need to *Copy and customize* the `search-default-ingestion` ingest pipeline. +Find this option above the settings for the `search-default-ingestion` ingest pipeline. This will create two new index-specific ingest pipelines. Next, we'll add an inference pipeline. diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc index f20e9148bf5e7..c7df40ff5e070 100644 --- a/docs/reference/search/retriever.asciidoc +++ b/docs/reference/search/retriever.asciidoc @@ -22,6 +22,9 @@ A <> that replaces the functionality of a traditi `knn`:: A <> that replaces the functionality of a <>. +`rescorer`:: +A <> that replaces the functionality of the <>. + `rrf`:: A <> that produces top documents from <>. @@ -371,6 +374,122 @@ GET movies/_search ---- // TEST[skip:uses ELSER] +[[rescorer-retriever]] +==== Rescorer Retriever + +The `rescorer` retriever re-scores only the results produced by its child retriever. +For the `standard` and `knn` retrievers, the `window_size` parameter specifies the number of documents examined per shard. + +For compound retrievers like `rrf`, the `window_size` parameter defines the total number of documents examined globally. + +When using the `rescorer`, an error is returned if the following conditions are not met: + +* The minimum configured rescore's `window_size` is: +** Greater than or equal to the `size` of the parent retriever for nested `rescorer` setups. +** Greater than or equal to the `size` of the search request when used as the primary retriever in the tree. + +* And the maximum rescore's `window_size` is: +** Smaller than or equal to the `size` or `rank_window_size` of the child retriever. + +[discrete] +[[rescorer-retriever-parameters]] +===== Parameters + +`rescore`:: +(Required. <>) ++ +Defines the <> applied sequentially to the top documents returned by the child retriever. + +`retriever`:: +(Required. <>) ++ +Specifies the child retriever responsible for generating the initial set of top documents to be re-ranked. + +`filter`:: +(Optional. <>) ++ +Applies a <> to the retriever, ensuring that all documents match the filter criteria without affecting their scores. + +[discrete] +[[rescorer-retriever-example]] +==== Example + +The `rescorer` retriever can be placed at any level within the retriever tree. +The following example demonstrates a `rescorer` applied to the results produced by an `rrf` retriever: + +[source,console] +---- +GET movies/_search +{ + "size": 10, <1> + "retriever": { + "rescorer": { <2> + "rescore": { + "query": { <3> + "window_size": 50, <4> + "rescore_query": { + "script_score": { + "script": { + "source": "cosineSimilarity(params.queryVector, 'product-vector_final_stage') + 1.0", + "params": { + "queryVector": [-0.5, 90.0, -10, 14.8, -156.0] + } + } + } + } + } + }, + "retriever": { <5> + "rrf": { + "rank_window_size": 100, <6> + "retrievers": [ + { + "standard": { + "query": { + "sparse_vector": { + "field": "plot_embedding", + "inference_id": "my-elser-model", + "query": "films that explore psychological depths" + } + } + } + }, + { + "standard": { + "query": { + "multi_match": { + "query": "crime", + "fields": [ + "plot", + "title" + ] + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [10, 22, 77], + "k": 10, + "num_candidates": 10 + } + } + ] + } + } + } + } +} +---- +// TEST[skip:uses ELSER] +<1> Specifies the number of top documents to return in the final response. +<2> A `rescorer` retriever applied as the final step. +<3> The definition of the `query` rescorer. +<4> Defines the number of documents to rescore from the child retriever. +<5> Specifies the child retriever definition. +<6> Defines the number of documents returned by the `rrf` retriever, which limits the available documents to + [[text-similarity-reranker-retriever]] ==== Text Similarity Re-ranker Retriever @@ -777,4 +896,4 @@ When a retriever is specified as part of a search, the following elements are no * <> * <> * <> -* <> +* <> use a <> instead diff --git a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java index a6b8a31fc3894..25f4e97bd12ee 100644 --- a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java +++ b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java @@ -11,6 +11,7 @@ import java.net.URL; import java.net.URLStreamHandlerFactory; +import java.util.List; public interface EntitlementChecker { @@ -29,4 +30,10 @@ public interface EntitlementChecker { void check$java_net_URLClassLoader$(Class callerClass, String name, URL[] urls, ClassLoader parent); void check$java_net_URLClassLoader$(Class callerClass, String name, URL[] urls, ClassLoader parent, URLStreamHandlerFactory factory); + + // Process creation + void check$$start(Class callerClass, ProcessBuilder that, ProcessBuilder.Redirect[] redirects); + + void check$java_lang_ProcessBuilder$startPipeline(Class callerClass, List builders); + } diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java index 1ac4a7506eacb..3cc4b97e9bfea 100644 --- a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java @@ -29,43 +29,47 @@ import java.util.stream.Collectors; import static java.util.Map.entry; +import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction.CheckAction.deniedToPlugins; +import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction.CheckAction.forPlugins; import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestEntitlementsCheckAction extends BaseRestHandler { private static final Logger logger = LogManager.getLogger(RestEntitlementsCheckAction.class); private final String prefix; - private record CheckAction(Runnable action, boolean isServerOnly) { - - static CheckAction serverOnly(Runnable action) { + record CheckAction(Runnable action, boolean isAlwaysDeniedToPlugins) { + /** + * These cannot be granted to plugins, so our test plugins cannot test the "allowed" case. + * Used both for always-denied entitlements as well as those granted only to the server itself. + */ + static CheckAction deniedToPlugins(Runnable action) { return new CheckAction(action, true); } - static CheckAction serverAndPlugin(Runnable action) { + static CheckAction forPlugins(Runnable action) { return new CheckAction(action, false); } } private static final Map checkActions = Map.ofEntries( - entry("runtime_exit", CheckAction.serverOnly(RestEntitlementsCheckAction::runtimeExit)), - entry("runtime_halt", CheckAction.serverOnly(RestEntitlementsCheckAction::runtimeHalt)), - entry("create_classloader", CheckAction.serverAndPlugin(RestEntitlementsCheckAction::createClassLoader)) + entry("runtime_exit", deniedToPlugins(RestEntitlementsCheckAction::runtimeExit)), + entry("runtime_halt", deniedToPlugins(RestEntitlementsCheckAction::runtimeHalt)), + entry("create_classloader", forPlugins(RestEntitlementsCheckAction::createClassLoader)), + // entry("processBuilder_start", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_start)), + entry("processBuilder_startPipeline", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_startPipeline)) ); @SuppressForbidden(reason = "Specifically testing Runtime.exit") private static void runtimeExit() { - logger.info("Calling Runtime.exit;"); Runtime.getRuntime().exit(123); } @SuppressForbidden(reason = "Specifically testing Runtime.halt") private static void runtimeHalt() { - logger.info("Calling Runtime.halt;"); Runtime.getRuntime().halt(123); } private static void createClassLoader() { - logger.info("Calling new URLClassLoader"); try (var classLoader = new URLClassLoader("test", new URL[0], RestEntitlementsCheckAction.class.getClassLoader())) { logger.info("Created URLClassLoader [{}]", classLoader.getName()); } catch (IOException e) { @@ -73,6 +77,18 @@ private static void createClassLoader() { } } + private static void processBuilder_start() { + // TODO: processBuilder().start(); + } + + private static void processBuilder_startPipeline() { + try { + ProcessBuilder.startPipeline(List.of()); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + public RestEntitlementsCheckAction(String prefix) { this.prefix = prefix; } @@ -80,7 +96,7 @@ public RestEntitlementsCheckAction(String prefix) { public static Set getServerAndPluginsCheckActions() { return checkActions.entrySet() .stream() - .filter(kv -> kv.getValue().isServerOnly() == false) + .filter(kv -> kv.getValue().isAlwaysDeniedToPlugins() == false) .map(Map.Entry::getKey) .collect(Collectors.toSet()); } @@ -112,6 +128,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli } return channel -> { + logger.info("Calling check action [{}]", actionName); checkAction.action().run(); channel.sendResponse(new RestResponse(RestStatus.OK, Strings.format("Succesfully executed action [%s]", actionName))); }; diff --git a/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java index d65981c30f0be..82146e6a87759 100644 --- a/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java +++ b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java @@ -27,7 +27,6 @@ import java.util.function.Supplier; public class EntitlementAllowedNonModularPlugin extends Plugin implements ActionPlugin { - @Override public List getRestHandlers( final Settings settings, diff --git a/libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java b/libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java index d81e23e311be1..8649daf272e70 100644 --- a/libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java +++ b/libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java @@ -27,7 +27,6 @@ import java.util.function.Supplier; public class EntitlementAllowedPlugin extends Plugin implements ActionPlugin { - @Override public List getRestHandlers( final Settings settings, diff --git a/libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java b/libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java index 0f908d84260fb..7ca89c735a602 100644 --- a/libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java +++ b/libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java @@ -27,7 +27,6 @@ import java.util.function.Supplier; public class EntitlementDeniedNonModularPlugin extends Plugin implements ActionPlugin { - @Override public List getRestHandlers( final Settings settings, diff --git a/libs/entitlement/qa/entitlement-denied/src/main/java/org/elasticsearch/entitlement/qa/EntitlementDeniedPlugin.java b/libs/entitlement/qa/entitlement-denied/src/main/java/org/elasticsearch/entitlement/qa/EntitlementDeniedPlugin.java index 0ed27e2e576e7..2a2fd35d47cf3 100644 --- a/libs/entitlement/qa/entitlement-denied/src/main/java/org/elasticsearch/entitlement/qa/EntitlementDeniedPlugin.java +++ b/libs/entitlement/qa/entitlement-denied/src/main/java/org/elasticsearch/entitlement/qa/EntitlementDeniedPlugin.java @@ -27,7 +27,6 @@ import java.util.function.Supplier; public class EntitlementDeniedPlugin extends Plugin implements ActionPlugin { - @Override public List getRestHandlers( final Settings settings, diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java index 9118f67cdc145..8e4cddc4d63ee 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java @@ -53,6 +53,7 @@ public class EntitlementInitialization { private static final String POLICY_FILE_NAME = "entitlement-policy.yaml"; + private static final Module ENTITLEMENTS_MODULE = PolicyManager.class.getModule(); private static ElasticsearchEntitlementChecker manager; @@ -92,7 +93,7 @@ private static PolicyManager createPolicyManager() throws IOException { "server", List.of(new Scope("org.elasticsearch.server", List.of(new ExitVMEntitlement(), new CreateClassLoaderEntitlement()))) ); - return new PolicyManager(serverPolicy, pluginPolicies, EntitlementBootstrap.bootstrapArgs().pluginResolver()); + return new PolicyManager(serverPolicy, pluginPolicies, EntitlementBootstrap.bootstrapArgs().pluginResolver(), ENTITLEMENTS_MODULE); } private static Map createPluginPolicies(Collection pluginData) throws IOException { diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java index a5ca0543ad15a..75365fbb74d65 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java @@ -14,6 +14,7 @@ import java.net.URL; import java.net.URLStreamHandlerFactory; +import java.util.List; /** * Implementation of the {@link EntitlementChecker} interface, providing additional @@ -67,4 +68,14 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) { ) { policyManager.checkCreateClassLoader(callerClass); } + + @Override + public void check$$start(Class callerClass, ProcessBuilder processBuilder, ProcessBuilder.Redirect[] redirects) { + policyManager.checkStartProcess(callerClass); + } + + @Override + public void check$java_lang_ProcessBuilder$startPipeline(Class callerClass, List builders) { + policyManager.checkStartProcess(callerClass); + } } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java index 8d3efe4eb98e6..e06f7768eb8be 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java @@ -15,6 +15,7 @@ import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import java.lang.StackWalker.StackFrame; import java.lang.module.ModuleFinder; import java.lang.module.ModuleReference; import java.util.ArrayList; @@ -29,6 +30,10 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static java.lang.StackWalker.Option.RETAIN_CLASS_REFERENCE; +import static java.util.Objects.requireNonNull; +import static java.util.function.Predicate.not; + public class PolicyManager { private static final Logger logger = LogManager.getLogger(ElasticsearchEntitlementChecker.class); @@ -63,6 +68,11 @@ public Stream getEntitlements(Class entitlementCla private static final Set systemModules = findSystemModules(); + /** + * Frames originating from this module are ignored in the permission logic. + */ + private final Module entitlementsModule; + private static Set findSystemModules() { var systemModulesDescriptors = ModuleFinder.ofSystem() .findAll() @@ -77,19 +87,44 @@ private static Set findSystemModules() { .collect(Collectors.toUnmodifiableSet()); } - public PolicyManager(Policy defaultPolicy, Map pluginPolicies, Function, String> pluginResolver) { - this.serverEntitlements = buildScopeEntitlementsMap(Objects.requireNonNull(defaultPolicy)); - this.pluginsEntitlements = Objects.requireNonNull(pluginPolicies) - .entrySet() + public PolicyManager( + Policy defaultPolicy, + Map pluginPolicies, + Function, String> pluginResolver, + Module entitlementsModule + ) { + this.serverEntitlements = buildScopeEntitlementsMap(requireNonNull(defaultPolicy)); + this.pluginsEntitlements = requireNonNull(pluginPolicies).entrySet() .stream() .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, e -> buildScopeEntitlementsMap(e.getValue()))); this.pluginResolver = pluginResolver; + this.entitlementsModule = entitlementsModule; } private static Map> buildScopeEntitlementsMap(Policy policy) { return policy.scopes.stream().collect(Collectors.toUnmodifiableMap(scope -> scope.name, scope -> scope.entitlements)); } + public void checkStartProcess(Class callerClass) { + neverEntitled(callerClass, "start process"); + } + + private void neverEntitled(Class callerClass, String operationDescription) { + var requestingModule = requestingModule(callerClass); + if (isTriviallyAllowed(requestingModule)) { + return; + } + + throw new NotEntitledException( + Strings.format( + "Not entitled: caller [%s], module [%s], operation [%s]", + callerClass, + requestingModule.getName(), + operationDescription + ) + ); + } + public void checkExitVM(Class callerClass) { checkEntitlementPresent(callerClass, ExitVMEntitlement.class); } @@ -185,7 +220,16 @@ private static boolean isServerModule(Module requestingModule) { return requestingModule.isNamed() && requestingModule.getLayer() == ModuleLayer.boot(); } - private static Module requestingModule(Class callerClass) { + /** + * Walks the stack to determine which module's entitlements should be checked. + * + * @param callerClass when non-null will be used if its module is suitable; + * this is a fast-path check that can avoid the stack walk + * in cases where the caller class is available. + * @return the requesting module, or {@code null} if the entire call stack + * comes from modules that are trusted. + */ + Module requestingModule(Class callerClass) { if (callerClass != null) { Module callerModule = callerClass.getModule(); if (systemModules.contains(callerModule) == false) { @@ -193,21 +237,34 @@ private static Module requestingModule(Class callerClass) { return callerModule; } } - int framesToSkip = 1 // getCallingClass (this method) - + 1 // the checkXxx method - + 1 // the runtime config method - + 1 // the instrumented method - ; - Optional module = StackWalker.getInstance(StackWalker.Option.RETAIN_CLASS_REFERENCE) - .walk( - s -> s.skip(framesToSkip) - .map(f -> f.getDeclaringClass().getModule()) - .filter(m -> systemModules.contains(m) == false) - .findFirst() - ); + Optional module = StackWalker.getInstance(RETAIN_CLASS_REFERENCE) + .walk(frames -> findRequestingModule(frames.map(StackFrame::getDeclaringClass))); return module.orElse(null); } + /** + * Given a stream of classes corresponding to the frames from a {@link StackWalker}, + * returns the module whose entitlements should be checked. + * + * @throws NullPointerException if the requesting module is {@code null} + */ + Optional findRequestingModule(Stream> classes) { + return classes.map(Objects::requireNonNull) + .map(PolicyManager::moduleOf) + .filter(m -> m != entitlementsModule) // Ignore the entitlements library itself + .filter(not(systemModules::contains)) // Skip trusted JDK modules + .findFirst(); + } + + private static Module moduleOf(Class c) { + var result = c.getModule(); + if (result == null) { + throw new NullPointerException("Entitlements system does not support non-modular class [" + c.getName() + "]"); + } else { + return result; + } + } + private static boolean isTriviallyAllowed(Module requestingModule) { if (requestingModule == null) { logger.debug("Entitlement trivially allowed: entire call stack is in composed of classes in system modules"); diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java index 45bdf2e457824..0789fcc8dc770 100644 --- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Stream; import static java.util.Map.entry; import static org.elasticsearch.entitlement.runtime.policy.PolicyManager.ALL_UNNAMED; @@ -37,11 +38,14 @@ @ESTestCase.WithoutSecurityManager public class PolicyManagerTests extends ESTestCase { + private static final Module NO_ENTITLEMENTS_MODULE = null; + public void testGetEntitlementsThrowsOnMissingPluginUnnamedModule() { var policyManager = new PolicyManager( createEmptyTestServerPolicy(), Map.of("plugin1", createPluginPolicy("plugin.module")), - c -> "plugin1" + c -> "plugin1", + NO_ENTITLEMENTS_MODULE ); // Any class from the current module (unnamed) will do @@ -62,7 +66,7 @@ public void testGetEntitlementsThrowsOnMissingPluginUnnamedModule() { } public void testGetEntitlementsThrowsOnMissingPolicyForPlugin() { - var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1"); + var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1", NO_ENTITLEMENTS_MODULE); // Any class from the current module (unnamed) will do var callerClass = this.getClass(); @@ -82,7 +86,7 @@ public void testGetEntitlementsThrowsOnMissingPolicyForPlugin() { } public void testGetEntitlementsFailureIsCached() { - var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1"); + var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1", NO_ENTITLEMENTS_MODULE); // Any class from the current module (unnamed) will do var callerClass = this.getClass(); @@ -103,7 +107,8 @@ public void testGetEntitlementsReturnsEntitlementsForPluginUnnamedModule() { var policyManager = new PolicyManager( createEmptyTestServerPolicy(), Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))), - c -> "plugin2" + c -> "plugin2", + NO_ENTITLEMENTS_MODULE ); // Any class from the current module (unnamed) will do @@ -115,7 +120,7 @@ public void testGetEntitlementsReturnsEntitlementsForPluginUnnamedModule() { } public void testGetEntitlementsThrowsOnMissingPolicyForServer() throws ClassNotFoundException { - var policyManager = new PolicyManager(createTestServerPolicy("example"), Map.of(), c -> null); + var policyManager = new PolicyManager(createTestServerPolicy("example"), Map.of(), c -> null, NO_ENTITLEMENTS_MODULE); // Tests do not run modular, so we cannot use a server class. // But we know that in production code the server module and its classes are in the boot layer. @@ -138,7 +143,7 @@ public void testGetEntitlementsThrowsOnMissingPolicyForServer() throws ClassNotF } public void testGetEntitlementsReturnsEntitlementsForServerModule() throws ClassNotFoundException { - var policyManager = new PolicyManager(createTestServerPolicy("jdk.httpserver"), Map.of(), c -> null); + var policyManager = new PolicyManager(createTestServerPolicy("jdk.httpserver"), Map.of(), c -> null, NO_ENTITLEMENTS_MODULE); // Tests do not run modular, so we cannot use a server class. // But we know that in production code the server module and its classes are in the boot layer. @@ -155,12 +160,13 @@ public void testGetEntitlementsReturnsEntitlementsForServerModule() throws Class public void testGetEntitlementsReturnsEntitlementsForPluginModule() throws IOException, ClassNotFoundException { final Path home = createTempDir(); - Path jar = creteMockPluginJar(home); + Path jar = createMockPluginJar(home); var policyManager = new PolicyManager( createEmptyTestServerPolicy(), Map.of("mock-plugin", createPluginPolicy("org.example.plugin")), - c -> "mock-plugin" + c -> "mock-plugin", + NO_ENTITLEMENTS_MODULE ); var layer = createLayerForJar(jar, "org.example.plugin"); @@ -179,7 +185,8 @@ public void testGetEntitlementsResultIsCached() { var policyManager = new PolicyManager( createEmptyTestServerPolicy(), Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))), - c -> "plugin2" + c -> "plugin2", + NO_ENTITLEMENTS_MODULE ); // Any class from the current module (unnamed) will do @@ -197,6 +204,73 @@ public void testGetEntitlementsResultIsCached() { assertThat(entitlementsAgain, sameInstance(cachedResult)); } + public void testRequestingModuleFastPath() throws IOException, ClassNotFoundException { + var callerClass = makeClassInItsOwnModule(); + assertEquals(callerClass.getModule(), policyManagerWithEntitlementsModule(NO_ENTITLEMENTS_MODULE).requestingModule(callerClass)); + } + + public void testRequestingModuleWithStackWalk() throws IOException, ClassNotFoundException { + var requestingClass = makeClassInItsOwnModule(); + var runtimeClass = makeClassInItsOwnModule(); // A class in the entitlements library itself + var ignorableClass = makeClassInItsOwnModule(); + var systemClass = Object.class; + + var policyManager = policyManagerWithEntitlementsModule(runtimeClass.getModule()); + + var requestingModule = requestingClass.getModule(); + + assertEquals( + "Skip one system frame", + requestingModule, + policyManager.findRequestingModule(Stream.of(systemClass, requestingClass, ignorableClass)).orElse(null) + ); + assertEquals( + "Skip multiple system frames", + requestingModule, + policyManager.findRequestingModule(Stream.of(systemClass, systemClass, systemClass, requestingClass, ignorableClass)) + .orElse(null) + ); + assertEquals( + "Skip system frame between runtime frames", + requestingModule, + policyManager.findRequestingModule(Stream.of(runtimeClass, systemClass, runtimeClass, requestingClass, ignorableClass)) + .orElse(null) + ); + assertEquals( + "Skip runtime frame between system frames", + requestingModule, + policyManager.findRequestingModule(Stream.of(systemClass, runtimeClass, systemClass, requestingClass, ignorableClass)) + .orElse(null) + ); + assertEquals( + "No system frames", + requestingModule, + policyManager.findRequestingModule(Stream.of(requestingClass, ignorableClass)).orElse(null) + ); + assertEquals( + "Skip runtime frames up to the first system frame", + requestingModule, + policyManager.findRequestingModule(Stream.of(runtimeClass, runtimeClass, systemClass, requestingClass, ignorableClass)) + .orElse(null) + ); + assertThrows( + "Non-modular caller frames are not supported", + NullPointerException.class, + () -> policyManager.findRequestingModule(Stream.of(systemClass, null)) + ); + } + + private static Class makeClassInItsOwnModule() throws IOException, ClassNotFoundException { + final Path home = createTempDir(); + Path jar = createMockPluginJar(home); + var layer = createLayerForJar(jar, "org.example.plugin"); + return layer.findLoader("org.example.plugin").loadClass("q.B"); + } + + private static PolicyManager policyManagerWithEntitlementsModule(Module entitlementsModule) { + return new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "test", entitlementsModule); + } + private static Policy createEmptyTestServerPolicy() { return new Policy("server", List.of()); } @@ -219,7 +293,7 @@ private static Policy createPluginPolicy(String... pluginModules) { ); } - private static Path creteMockPluginJar(Path home) throws IOException { + private static Path createMockPluginJar(Path home) throws IOException { Path jar = home.resolve("mock-plugin.jar"); Map sources = Map.ofEntries( diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java index 5249077bdfdbb..7adf6a09e9a19 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java @@ -96,6 +96,8 @@ public void testCancellationDuringTimeSeriesAggregation() throws Exception { } logger.info("Executing search"); + // we have to explicitly set error_trace=true for the later exception check for `TimeSeriesIndexSearcher` + client().threadPool().getThreadContext().putHeader("error_trace", "true"); TimeSeriesAggregationBuilder timeSeriesAggregationBuilder = new TimeSeriesAggregationBuilder("test_agg"); ActionFuture searchResponse = prepareSearch("test").setQuery(matchAllQuery()) .addAggregation( diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index a97154fd4d1ff..c980aaba71444 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -101,7 +101,12 @@ import org.apache.lucene.analysis.tr.TurkishAnalyzer; import org.apache.lucene.analysis.util.ElisionFilter; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.AnalyzerProvider; import org.elasticsearch.index.analysis.CharFilterFactory; @@ -134,6 +139,8 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, ScriptPlugin { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(CommonAnalysisPlugin.class); + private final SetOnce scriptServiceHolder = new SetOnce<>(); private final SetOnce synonymsManagementServiceHolder = new SetOnce<>(); @@ -224,6 +231,28 @@ public Map> getTokenFilters() { filters.put("dictionary_decompounder", requiresAnalysisSettings(DictionaryCompoundWordTokenFilterFactory::new)); filters.put("dutch_stem", DutchStemTokenFilterFactory::new); filters.put("edge_ngram", EdgeNGramTokenFilterFactory::new); + filters.put("edgeNGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { + return new EdgeNGramTokenFilterFactory(indexSettings, environment, name, settings) { + @Override + public TokenStream create(TokenStream tokenStream) { + if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [edge_ngram] instead." + ); + } else { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "edgeNGram_deprecation", + "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [edge_ngram] instead." + ); + } + return super.create(tokenStream); + } + + }; + }); filters.put("elision", requiresAnalysisSettings(ElisionTokenFilterFactory::new)); filters.put("fingerprint", FingerprintTokenFilterFactory::new); filters.put("flatten_graph", FlattenGraphTokenFilterFactory::new); @@ -243,6 +272,28 @@ public Map> getTokenFilters() { filters.put("min_hash", MinHashTokenFilterFactory::new); filters.put("multiplexer", MultiplexerTokenFilterFactory::new); filters.put("ngram", NGramTokenFilterFactory::new); + filters.put("nGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { + return new NGramTokenFilterFactory(indexSettings, environment, name, settings) { + @Override + public TokenStream create(TokenStream tokenStream) { + if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [ngram] instead." + ); + } else { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "nGram_deprecation", + "The [nGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [ngram] instead." + ); + } + return super.create(tokenStream); + } + + }; + }); filters.put("pattern_capture", requiresAnalysisSettings(PatternCaptureGroupTokenFilterFactory::new)); filters.put("pattern_replace", requiresAnalysisSettings(PatternReplaceTokenFilterFactory::new)); filters.put("persian_normalization", PersianNormalizationFilterFactory::new); @@ -294,7 +345,39 @@ public Map> getTokenizers() { tokenizers.put("simple_pattern", SimplePatternTokenizerFactory::new); tokenizers.put("simple_pattern_split", SimplePatternSplitTokenizerFactory::new); tokenizers.put("thai", ThaiTokenizerFactory::new); + tokenizers.put("nGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { + if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [nGram] tokenizer name was deprecated in 7.6. " + + "Please use the tokenizer name to [ngram] for indices created in versions 8 or higher instead." + ); + } else if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_7_6_0)) { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "nGram_tokenizer_deprecation", + "The [nGram] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [ngram] instead." + ); + } + return new NGramTokenizerFactory(indexSettings, environment, name, settings); + }); tokenizers.put("ngram", NGramTokenizerFactory::new); + tokenizers.put("edgeNGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { + if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [edgeNGram] tokenizer name was deprecated in 7.6. " + + "Please use the tokenizer name to [edge_nGram] for indices created in versions 8 or higher instead." + ); + } else if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_7_6_0)) { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "edgeNGram_tokenizer_deprecation", + "The [edgeNGram] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [edge_ngram] instead." + ); + } + return new EdgeNGramTokenizerFactory(indexSettings, environment, name, settings); + }); tokenizers.put("edge_ngram", EdgeNGramTokenizerFactory::new); tokenizers.put("char_group", CharGroupTokenizerFactory::new); tokenizers.put("classic", ClassicTokenizerFactory::new); @@ -505,17 +588,53 @@ public List getPreConfiguredTokenizers() { tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new)); - tokenizers.add( - PreConfiguredTokenizer.indexVersion( - "edge_ngram", - (version) -> new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE) - ) - ); + tokenizers.add(PreConfiguredTokenizer.indexVersion("edge_ngram", (version) -> { + if (version.onOrAfter(IndexVersions.V_7_3_0)) { + return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); + } + return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); + })); tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1))); tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new)); // TODO deprecate and remove in API // This is already broken with normalization, so backwards compat isn't necessary? tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", XLowerCaseTokenizer::new)); + + tokenizers.add(PreConfiguredTokenizer.indexVersion("nGram", (version) -> { + if (version.onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [nGram] tokenizer name was deprecated in 7.6. " + + "Please use the tokenizer name to [ngram] for indices created in versions 8 or higher instead." + ); + } else if (version.onOrAfter(IndexVersions.V_7_6_0)) { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "nGram_tokenizer_deprecation", + "The [nGram] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [ngram] instead." + ); + } + return new NGramTokenizer(); + })); + tokenizers.add(PreConfiguredTokenizer.indexVersion("edgeNGram", (version) -> { + if (version.onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [edgeNGram] tokenizer name was deprecated in 7.6. " + + "Please use the tokenizer name to [edge_ngram] for indices created in versions 8 or higher instead." + ); + } else if (version.onOrAfter(IndexVersions.V_7_6_0)) { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "edgeNGram_tokenizer_deprecation", + "The [edgeNGram] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [edge_ngram] instead." + ); + } + if (version.onOrAfter(IndexVersions.V_7_3_0)) { + return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); + } + return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); + })); tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new)); return tokenizers; diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java new file mode 100644 index 0000000000000..9972d58b2dcc1 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java @@ -0,0 +1,292 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.Tokenizer; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.analysis.TokenizerFactory; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.index.IndexVersionUtils; + +import java.io.IOException; +import java.util.Map; + +public class CommonAnalysisPluginTests extends ESTestCase { + + /** + * Check that the deprecated "nGram" filter throws exception for indices created since 7.0.0 and + * logs a warning for earlier indices when the filter is used as a custom filter + */ + public void testNGramFilterInCustomAnalyzerDeprecationError() throws IOException { + final Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put( + IndexMetadata.SETTING_VERSION_CREATED, + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()) + ) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.custom_analyzer.filter", "my_ngram") + .put("index.analysis.filter.my_ngram.type", "nGram") + .build(); + + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settings), settings, commonAnalysisPlugin) + ); + assertEquals( + "The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [ngram] instead.", + ex.getMessage() + ); + } + + final Settings settingsPre7 = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put( + IndexMetadata.SETTING_VERSION_CREATED, + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_6_0) + ) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.custom_analyzer.filter", "my_ngram") + .put("index.analysis.filter.my_ngram.type", "nGram") + .build(); + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settingsPre7), settingsPre7, commonAnalysisPlugin); + assertWarnings( + "The [nGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [ngram] instead." + ); + } + } + + /** + * Check that the deprecated "edgeNGram" filter throws exception for indices created since 7.0.0 and + * logs a warning for earlier indices when the filter is used as a custom filter + */ + public void testEdgeNGramFilterInCustomAnalyzerDeprecationError() throws IOException { + final Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put( + IndexMetadata.SETTING_VERSION_CREATED, + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()) + ) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.custom_analyzer.filter", "my_ngram") + .put("index.analysis.filter.my_ngram.type", "edgeNGram") + .build(); + + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settings), settings, commonAnalysisPlugin) + ); + assertEquals( + "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [edge_ngram] instead.", + ex.getMessage() + ); + } + + final Settings settingsPre7 = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put( + IndexMetadata.SETTING_VERSION_CREATED, + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_6_0) + ) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.custom_analyzer.filter", "my_ngram") + .put("index.analysis.filter.my_ngram.type", "edgeNGram") + .build(); + + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settingsPre7), settingsPre7, commonAnalysisPlugin); + assertWarnings( + "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [edge_ngram] instead." + ); + } + } + + /** + * Check that we log a deprecation warning for "nGram" and "edgeNGram" tokenizer names with 7.6 and + * disallow usages for indices created after 8.0 + */ + public void testNGramTokenizerDeprecation() throws IOException { + // tests for prebuilt tokenizer + doTestPrebuiltTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), + false + ); + doTestPrebuiltTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), + false + ); + doTestPrebuiltTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.V_7_6_0, + IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) + ), + true + ); + doTestPrebuiltTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.V_7_6_0, + IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) + ), + true + ); + expectThrows( + IllegalArgumentException.class, + () -> doTestPrebuiltTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()), + true + ) + ); + expectThrows( + IllegalArgumentException.class, + () -> doTestPrebuiltTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()), + true + ) + ); + + // same batch of tests for custom tokenizer definition in the settings + doTestCustomTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), + false + ); + doTestCustomTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), + false + ); + doTestCustomTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.V_7_6_0, + IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) + ), + true + ); + doTestCustomTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.V_7_6_0, + IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) + ), + true + ); + expectThrows( + IllegalArgumentException.class, + () -> doTestCustomTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()), + true + ) + ); + expectThrows( + IllegalArgumentException.class, + () -> doTestCustomTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()), + true + ) + ); + } + + public void doTestPrebuiltTokenizerDeprecation(String deprecatedName, String replacement, IndexVersion version, boolean expectWarning) + throws IOException { + final Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .build(); + + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + Map tokenizers = createTestAnalysis( + IndexSettingsModule.newIndexSettings("index", settings), + settings, + commonAnalysisPlugin + ).tokenizer; + TokenizerFactory tokenizerFactory = tokenizers.get(deprecatedName); + + Tokenizer tokenizer = tokenizerFactory.create(); + assertNotNull(tokenizer); + if (expectWarning) { + assertWarnings( + "The [" + + deprecatedName + + "] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [" + + replacement + + "] instead." + ); + } + } + } + + public void doTestCustomTokenizerDeprecation(String deprecatedName, String replacement, IndexVersion version, boolean expectWarning) + throws IOException { + final Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "my_tokenizer") + .put("index.analysis.tokenizer.my_tokenizer.type", deprecatedName) + .build(); + + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settings), settings, commonAnalysisPlugin); + + if (expectWarning) { + assertWarnings( + "The [" + + deprecatedName + + "] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [" + + replacement + + "] instead." + ); + } + } + } +} diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java index 11d1653439e59..c998e927e25a8 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java @@ -34,7 +34,7 @@ public class EdgeNGramTokenizerTests extends ESTokenStreamTestCase { - private static IndexAnalyzers buildAnalyzers(IndexVersion version, String tokenizer) throws IOException { + private IndexAnalyzers buildAnalyzers(IndexVersion version, String tokenizer) throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); Settings indexSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, version) @@ -54,6 +54,7 @@ public void testPreConfiguredTokenizer() throws IOException { assertNotNull(analyzer); assertAnalyzesTo(analyzer, "test", new String[] { "t", "te" }); } + } public void testCustomTokenChars() throws IOException { diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java index 4fc6ca96b5f08..af57b8270ff02 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java @@ -118,7 +118,7 @@ public void testSynonymWordDeleteByAnalyzer() throws IOException { // Test with an index version where lenient should always be false by default IndexVersion randomNonLenientIndexVersion = IndexVersionUtils.randomVersionBetween( random(), - IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersions.INDEX_SORTING_ON_NESTED ); assertIsNotLenient.accept(randomNonLenientIndexVersion, false); @@ -177,7 +177,7 @@ public void testSynonymWordDeleteByAnalyzerFromFile() throws IOException { // Test with an index version where lenient should always be false by default IndexVersion randomNonLenientIndexVersion = IndexVersionUtils.randomVersionBetween( random(), - IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersions.INDEX_SORTING_ON_NESTED ); assertIsNotLenient.accept(randomNonLenientIndexVersion, false); @@ -231,7 +231,7 @@ public void testExpandSynonymWordDeleteByAnalyzer() throws IOException { // Test with an index version where lenient should always be false by default IndexVersion randomNonLenientIndexVersion = IndexVersionUtils.randomVersionBetween( random(), - IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersions.INDEX_SORTING_ON_NESTED ); assertIsNotLenient.accept(randomNonLenientIndexVersion, false); @@ -338,7 +338,7 @@ public void testShingleFilters() { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .put("index.analysis.filter.synonyms.type", "synonym") @@ -392,7 +392,7 @@ public void testPreconfiguredTokenFilters() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .build(); @@ -424,7 +424,7 @@ public void testDisallowedTokenFilters() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .putList("common_words", "a", "b") diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java index 6bec8dc1ebc62..d30e9d3c68cc9 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java @@ -124,11 +124,7 @@ public void testOldVersionGetXUniqueTokenFilter() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.MINIMUM_COMPATIBLE, - IndexVersionUtils.getPreviousVersion(IndexVersions.UNIQUE_TOKEN_FILTER_POS_FIX) - ) + IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.UNIQUE_TOKEN_FILTER_POS_FIX) ) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java index 2e7f0de027de7..37bff97a07ae2 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java @@ -9,8 +9,10 @@ package org.elasticsearch.painless.spi; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.painless.spi.annotation.WhitelistAnnotationParser; +import java.io.InputStream; import java.io.InputStreamReader; import java.io.LineNumberReader; import java.lang.reflect.Constructor; @@ -140,7 +142,7 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep * } * } */ - public static Whitelist loadFromResourceFiles(Class resource, Map parsers, String... filepaths) { + public static Whitelist loadFromResourceFiles(Class owner, Map parsers, String... filepaths) { List whitelistClasses = new ArrayList<>(); List whitelistStatics = new ArrayList<>(); List whitelistClassBindings = new ArrayList<>(); @@ -153,7 +155,7 @@ public static Whitelist loadFromResourceFiles(Class resource, Map resource, Map) resource::getClassLoader); + ClassLoader loader = AccessController.doPrivileged((PrivilegedAction) owner::getClassLoader); return new Whitelist(loader, whitelistClasses, whitelistStatics, whitelistClassBindings, Collections.emptyList()); } + private static InputStream getResourceAsStream(Class owner, String name) { + InputStream stream = owner.getResourceAsStream(name); + if (stream == null) { + String msg = "Whitelist file [" + + owner.getPackageName().replace(".", "/") + + "/" + + name + + "] not found from owning class [" + + owner.getName() + + "]."; + if (owner.getModule().isNamed()) { + msg += " Check that the file exists and the package [" + + owner.getPackageName() + + "] is opened " + + "to module " + + WhitelistLoader.class.getModule().getName(); + } + throw new ResourceNotFoundException(msg); + } + return stream; + } + private static List parseWhitelistAnnotations(Map parsers, String line) { List annotations; diff --git a/modules/lang-painless/spi/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java b/modules/lang-painless/spi/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java index e62d0b438b098..b46bc118e0913 100644 --- a/modules/lang-painless/spi/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java +++ b/modules/lang-painless/spi/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.painless; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.painless.spi.WhitelistClass; import org.elasticsearch.painless.spi.WhitelistLoader; @@ -17,10 +18,18 @@ import org.elasticsearch.painless.spi.annotation.NoImportAnnotation; import org.elasticsearch.painless.spi.annotation.WhitelistAnnotationParser; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.compiler.InMemoryJavaCompiler; +import org.elasticsearch.test.jar.JarUtils; +import java.lang.ModuleLayer.Controller; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; import java.util.HashMap; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + public class WhitelistLoaderTests extends ESTestCase { public void testUnknownAnnotations() { @@ -96,4 +105,52 @@ public void testAnnotations() { assertEquals(3, count); } + + public void testMissingWhitelistResource() { + var e = expectThrows(ResourceNotFoundException.class, () -> WhitelistLoader.loadFromResourceFiles(Whitelist.class, "missing.txt")); + assertThat( + e.getMessage(), + equalTo( + "Whitelist file [org/elasticsearch/painless/spi/missing.txt] not found" + + " from owning class [org.elasticsearch.painless.spi.Whitelist]." + ) + ); + } + + public void testMissingWhitelistResourceInModule() throws Exception { + Map sources = new HashMap<>(); + sources.put("module-info", "module m {}"); + sources.put("p.TestOwner", "package p; public class TestOwner { }"); + var classToBytes = InMemoryJavaCompiler.compile(sources); + + Path dir = createTempDir(getTestName()); + Path jar = dir.resolve("m.jar"); + Map jarEntries = new HashMap<>(); + jarEntries.put("module-info.class", classToBytes.get("module-info")); + jarEntries.put("p/TestOwner.class", classToBytes.get("p.TestOwner")); + jarEntries.put("p/resource.txt", "# test resource".getBytes(StandardCharsets.UTF_8)); + JarUtils.createJarWithEntries(jar, jarEntries); + + try (var loader = JarUtils.loadJar(jar)) { + Controller controller = JarUtils.loadModule(jar, loader.classloader(), "m"); + Module module = controller.layer().findModule("m").orElseThrow(); + + Class ownerClass = module.getClassLoader().loadClass("p.TestOwner"); + + // first check we get a nice error message when accessing the resource + var e = expectThrows(ResourceNotFoundException.class, () -> WhitelistLoader.loadFromResourceFiles(ownerClass, "resource.txt")); + assertThat( + e.getMessage(), + equalTo( + "Whitelist file [p/resource.txt] not found from owning class [p.TestOwner]." + + " Check that the file exists and the package [p] is opened to module null" + ) + ); + + // now check we can actually read it once the package is opened to us + controller.addOpens(module, "p", WhitelistLoader.class.getModule()); + var whitelist = WhitelistLoader.loadFromResourceFiles(ownerClass, "resource.txt"); + assertThat(whitelist, notNullValue()); + } + } } diff --git a/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/GeoBoundingBoxQueryLegacyGeoShapeIT.java b/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/GeoBoundingBoxQueryLegacyGeoShapeIT.java index 37c31c8af47b0..d2dd5b7442dd2 100644 --- a/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/GeoBoundingBoxQueryLegacyGeoShapeIT.java +++ b/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/GeoBoundingBoxQueryLegacyGeoShapeIT.java @@ -45,6 +45,6 @@ public XContentBuilder getMapping() throws IOException { @Override public IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomCompatibleVersion(random()); + return IndexVersionUtils.randomCompatibleWriteVersion(random()); } } diff --git a/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java b/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java index 918c343b79b7b..73b7c07c45fe5 100644 --- a/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java +++ b/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java @@ -41,7 +41,7 @@ protected void getGeoShapeMapping(XContentBuilder b) throws IOException { @Override protected IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomCompatibleVersion(random()); + return IndexVersionUtils.randomCompatibleWriteVersion(random()); } @Override diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java index 9b83cd9ffdb2b..bd5b289abc588 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.geo.GeometryNormalizer; import org.elasticsearch.common.geo.GeometryParser; import org.elasticsearch.common.geo.Orientation; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.GeometryCollection; import org.elasticsearch.geometry.Line; @@ -344,8 +343,6 @@ public void testParsePolygon() throws IOException, ParseException { assertGeometryEquals(p, polygonGeoJson, false); } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) - @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParse3DPolygon() throws IOException, ParseException { XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder() .startObject() diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java index 5d0df9215ef25..f944a368b2a6c 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeometryNormalizer; import org.elasticsearch.common.geo.Orientation; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.Line; import org.elasticsearch.geometry.MultiLine; @@ -303,8 +302,6 @@ public void testParseMixedDimensionPolyWithHole() throws IOException, ParseExcep assertThat(e, hasToString(containsString("coordinate dimensions do not match"))); } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) - @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParseMixedDimensionPolyWithHoleStoredZ() throws IOException { List shellCoordinates = new ArrayList<>(); shellCoordinates.add(new Coordinate(100, 0)); @@ -338,8 +335,6 @@ public void testParseMixedDimensionPolyWithHoleStoredZ() throws IOException { assertThat(e, hasToString(containsString("unable to add coordinate to CoordinateBuilder: coordinate dimensions do not match"))); } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) - @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParsePolyWithStoredZ() throws IOException { List shellCoordinates = new ArrayList<>(); shellCoordinates.add(new Coordinate(100, 0, 0)); @@ -363,8 +358,6 @@ public void testParsePolyWithStoredZ() throws IOException { assertEquals(shapeBuilder.numDimensions(), 3); } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) - @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParseOpenPolygon() throws IOException { String openPolygon = "POLYGON ((100 5, 100 10, 90 10, 90 5))"; diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java index 7352b4d88a42b..c97b0a28d22de 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java @@ -13,7 +13,6 @@ import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; -import org.apache.lucene.tests.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoUtils; @@ -21,7 +20,6 @@ import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.SpatialStrategy; import org.elasticsearch.core.CheckedConsumer; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Point; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -56,8 +54,6 @@ import static org.mockito.Mockito.when; @SuppressWarnings("deprecation") -@UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) -@AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") public class LegacyGeoShapeFieldMapperTests extends MapperTestCase { @Override diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java index bf616c8190324..f5e09f19c1a71 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java @@ -8,9 +8,7 @@ */ package org.elasticsearch.legacygeo.mapper; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.common.geo.SpatialStrategy; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.FieldTypeTestCase; @@ -23,8 +21,6 @@ import java.util.List; import java.util.Map; -@UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) -@LuceneTestCase.AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") public class LegacyGeoShapeFieldTypeTests extends FieldTypeTestCase { /** diff --git a/muted-tests.yml b/muted-tests.yml index 2677b3e75d86e..12f1fc510a332 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -238,8 +238,6 @@ tests: - class: org.elasticsearch.datastreams.DataStreamsClientYamlTestSuiteIT method: test {p0=data_stream/120_data_streams_stats/Multiple data stream} issue: https://github.com/elastic/elasticsearch/issues/118217 -- class: org.elasticsearch.validation.DotPrefixClientYamlTestSuiteIT - issue: https://github.com/elastic/elasticsearch/issues/118224 - class: org.elasticsearch.packaging.test.ArchiveTests method: test60StartAndStop issue: https://github.com/elastic/elasticsearch/issues/118216 @@ -290,6 +288,19 @@ tests: - class: org.elasticsearch.cluster.service.MasterServiceTests method: testThreadContext issue: https://github.com/elastic/elasticsearch/issues/118914 +- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT + method: test {yaml=indices.create/20_synthetic_source/create index with use_synthetic_source} + issue: https://github.com/elastic/elasticsearch/issues/118955 +- class: org.elasticsearch.repositories.blobstore.testkit.analyze.SecureHdfsRepositoryAnalysisRestIT + issue: https://github.com/elastic/elasticsearch/issues/118970 +- class: org.elasticsearch.xpack.security.authc.AuthenticationServiceTests + method: testInvalidToken + issue: https://github.com/elastic/elasticsearch/issues/119019 +- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT + method: test {p0=synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set} + issue: https://github.com/elastic/elasticsearch/issues/116777 +- class: org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryRunAsIT + issue: https://github.com/elastic/elasticsearch/issues/115727 # Examples: # diff --git a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java index 483c8ccef1202..e51d1f24a88ad 100644 --- a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java +++ b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java @@ -44,7 +44,7 @@ public void testDisallowedWithSynonyms() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .build(); diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchErrorTraceIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchErrorTraceIT.java new file mode 100644 index 0000000000000..6f9ab8ccdfdec --- /dev/null +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchErrorTraceIT.java @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.http; + +import org.apache.http.entity.ContentType; +import org.apache.http.nio.entity.NByteArrayEntity; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.search.MultiSearchRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.client.Request; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.transport.TransportMessageListener; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery; + +public class SearchErrorTraceIT extends HttpSmokeTestCase { + private AtomicBoolean hasStackTrace; + + @Before + private void setupMessageListener() { + internalCluster().getDataNodeInstances(TransportService.class).forEach(ts -> { + ts.addMessageListener(new TransportMessageListener() { + @Override + public void onResponseSent(long requestId, String action, Exception error) { + TransportMessageListener.super.onResponseSent(requestId, action, error); + if (action.startsWith("indices:data/read/search")) { + Optional throwable = ExceptionsHelper.unwrapCausesAndSuppressed( + error, + t -> t.getStackTrace().length > 0 + ); + hasStackTrace.set(throwable.isPresent()); + } + } + }); + }); + } + + private void setupIndexWithDocs() { + createIndex("test1", "test2"); + indexRandom( + true, + prepareIndex("test1").setId("1").setSource("field", "foo"), + prepareIndex("test2").setId("10").setSource("field", 5) + ); + refresh(); + } + + public void testSearchFailingQueryErrorTraceDefault() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + getRestClient().performRequest(searchRequest); + assertFalse(hasStackTrace.get()); + } + + public void testSearchFailingQueryErrorTraceTrue() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + searchRequest.addParameter("error_trace", "true"); + getRestClient().performRequest(searchRequest); + assertTrue(hasStackTrace.get()); + } + + public void testSearchFailingQueryErrorTraceFalse() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + searchRequest.addParameter("error_trace", "false"); + getRestClient().performRequest(searchRequest); + assertFalse(hasStackTrace.get()); + } + + public void testMultiSearchFailingQueryErrorTraceDefault() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + XContentType contentType = XContentType.JSON; + MultiSearchRequest multiSearchRequest = new MultiSearchRequest().add( + new SearchRequest("test*").source(new SearchSourceBuilder().query(simpleQueryStringQuery("foo").field("field"))) + ); + Request searchRequest = new Request("POST", "/_msearch"); + byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); + searchRequest.setEntity( + new NByteArrayEntity(requestBody, ContentType.create(contentType.mediaTypeWithoutParameters(), (Charset) null)) + ); + getRestClient().performRequest(searchRequest); + assertFalse(hasStackTrace.get()); + } + + public void testMultiSearchFailingQueryErrorTraceTrue() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + XContentType contentType = XContentType.JSON; + MultiSearchRequest multiSearchRequest = new MultiSearchRequest().add( + new SearchRequest("test*").source(new SearchSourceBuilder().query(simpleQueryStringQuery("foo").field("field"))) + ); + Request searchRequest = new Request("POST", "/_msearch"); + byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); + searchRequest.setEntity( + new NByteArrayEntity(requestBody, ContentType.create(contentType.mediaTypeWithoutParameters(), (Charset) null)) + ); + searchRequest.addParameter("error_trace", "true"); + getRestClient().performRequest(searchRequest); + assertTrue(hasStackTrace.get()); + } + + public void testMultiSearchFailingQueryErrorTraceFalse() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + XContentType contentType = XContentType.JSON; + MultiSearchRequest multiSearchRequest = new MultiSearchRequest().add( + new SearchRequest("test*").source(new SearchSourceBuilder().query(simpleQueryStringQuery("foo").field("field"))) + ); + Request searchRequest = new Request("POST", "/_msearch"); + byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); + searchRequest.setEntity( + new NByteArrayEntity(requestBody, ContentType.create(contentType.mediaTypeWithoutParameters(), (Charset) null)) + ); + searchRequest.addParameter("error_trace", "false"); + getRestClient().performRequest(searchRequest); + + assertFalse(hasStackTrace.get()); + } +} diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index bdee32e596c4c..f23b5460f7d53 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -70,4 +70,5 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.skipTest("search.vectors/41_knn_search_bbq_hnsw/Test knn search", "Scoring has changed in latest versions") task.skipTest("search.vectors/42_knn_search_bbq_flat/Test knn search", "Scoring has changed in latest versions") task.skipTest("synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set", "Can't work until auto-expand replicas is 0-1 for synonyms index") + task.skipTest("search/90_search_after/_shard_doc sort", "restriction has been lifted in latest versions") }) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index fc0bfa144bbc4..edb684168278b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -2049,5 +2049,6 @@ create index with use_synthetic_source: indices.disk_usage: index: test run_expensive_tasks: true - - gt: { test.fields.field.total_in_bytes: 0 } - - is_false: test.fields.field._recovery_source + flush: false + - gt: { test.store_size_in_bytes: 0 } + - is_false: test.fields._recovery_source diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/30_rescorer_retriever.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/30_rescorer_retriever.yml new file mode 100644 index 0000000000000..2c16de61c6b15 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/30_rescorer_retriever.yml @@ -0,0 +1,225 @@ +setup: + - requires: + cluster_features: [ "search.retriever.rescorer.enabled" ] + reason: "Support for rescorer retriever" + + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + available: + type: boolean + features: + type: rank_features + + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {"_id": 1 }}' + - '{"features": { "first_stage": 1, "second_stage": 10}, "available": true, "group": 1}' + - '{"index": {"_id": 2 }}' + - '{"features": { "first_stage": 2, "second_stage": 9}, "available": false, "group": 1}' + - '{"index": {"_id": 3 }}' + - '{"features": { "first_stage": 3, "second_stage": 8}, "available": false, "group": 3}' + - '{"index": {"_id": 4 }}' + - '{"features": { "first_stage": 4, "second_stage": 7}, "available": true, "group": 1}' + - '{"index": {"_id": 5 }}' + - '{"features": { "first_stage": 5, "second_stage": 6}, "available": true, "group": 3}' + - '{"index": {"_id": 6 }}' + - '{"features": { "first_stage": 6, "second_stage": 5}, "available": false, "group": 2}' + - '{"index": {"_id": 7 }}' + - '{"features": { "first_stage": 7, "second_stage": 4}, "available": true, "group": 3}' + - '{"index": {"_id": 8 }}' + - '{"features": { "first_stage": 8, "second_stage": 3}, "available": true, "group": 1}' + - '{"index": {"_id": 9 }}' + - '{"features": { "first_stage": 9, "second_stage": 2}, "available": true, "group": 2}' + - '{"index": {"_id": 10 }}' + - '{"features": { "first_stage": 10, "second_stage": 1}, "available": false, "group": 1}' + +--- +"Rescorer retriever basic": + - do: + search: + index: test + body: + retriever: + rescorer: + rescore: + window_size: 10 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: { } + query_weight: 0 + retriever: + standard: + query: + rank_feature: + field: "features.first_stage" + linear: { } + size: 2 + + - match: { hits.total.value: 10 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._score: 10.0 } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1._score: 9.0 } + + - do: + search: + index: test + body: + retriever: + rescorer: + rescore: + window_size: 3 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: {} + query_weight: 0 + retriever: + standard: + query: + rank_feature: + field: "features.first_stage" + linear: {} + size: 2 + + - match: {hits.total.value: 10} + - match: {hits.hits.0._id: "8"} + - match: { hits.hits.0._score: 3.0 } + - match: {hits.hits.1._id: "9"} + - match: { hits.hits.1._score: 2.0 } + +--- +"Rescorer retriever with pre-filters": + - do: + search: + index: test + body: + retriever: + rescorer: + filter: + match: + available: true + rescore: + window_size: 10 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: { } + query_weight: 0 + retriever: + standard: + query: + rank_feature: + field: "features.first_stage" + linear: { } + size: 2 + + - match: { hits.total.value: 6 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._score: 10.0 } + - match: { hits.hits.1._id: "4" } + - match: { hits.hits.1._score: 7.0 } + + - do: + search: + index: test + body: + retriever: + rescorer: + rescore: + window_size: 4 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: { } + query_weight: 0 + retriever: + standard: + filter: + match: + available: true + query: + rank_feature: + field: "features.first_stage" + linear: { } + size: 2 + + - match: { hits.total.value: 6 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.0._score: 6.0 } + - match: { hits.hits.1._id: "7" } + - match: { hits.hits.1._score: 4.0 } + +--- +"Rescorer retriever and collapsing": + - do: + search: + index: test + body: + retriever: + rescorer: + rescore: + window_size: 10 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: { } + query_weight: 0 + retriever: + standard: + query: + rank_feature: + field: "features.first_stage" + linear: { } + collapse: + field: group + size: 3 + + - match: { hits.total.value: 10 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._score: 10.0 } + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.1._score: 8.0 } + - match: { hits.hits.2._id: "6" } + - match: { hits.hits.2._score: 5.0 } + +--- +"Rescorer retriever and invalid window size": + - do: + catch: "/\\[rescorer\\] requires \\[window_size: 5\\] be greater than or equal to \\[size: 10\\]/" + search: + index: test + body: + retriever: + rescorer: + rescore: + window_size: 5 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: { } + query_weight: 0 + retriever: + standard: + query: + rank_feature: + field: "features.first_stage" + linear: { } + size: 10 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/90_search_after.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/90_search_after.yml index 1fefc8bffffa1..d3b2b5a412717 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/90_search_after.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/90_search_after.yml @@ -218,31 +218,6 @@ - match: {hits.hits.0._source.timestamp: "2019-10-21 00:30:04.828740" } - match: {hits.hits.0.sort: [1571617804828740000] } - ---- -"_shard_doc sort": - - requires: - cluster_features: ["gte_v7.12.0"] - reason: _shard_doc sort was added in 7.12 - - - do: - indices.create: - index: test - - do: - index: - index: test - id: "1" - body: { id: 1, foo: bar, age: 18 } - - - do: - catch: /\[_shard_doc\] sort field cannot be used without \[point in time\]/ - search: - index: test - body: - size: 1 - sort: ["_shard_doc"] - search_after: [ 0L ] - --- "Format sort values": - requires: diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java index 47f96aebacd7d..fa2b053ead348 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java @@ -39,7 +39,7 @@ protected boolean forbidPrivateIndexSettings() { } public void testCreateCloneIndex() { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); int numPrimaryShards = randomIntBetween(1, 5); prepareCreate("source").setSettings( Settings.builder().put(indexSettings()).put("number_of_shards", numPrimaryShards).put("index.version.created", version) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 8391ab270b1d1..9ba6ac4bd9c58 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -341,8 +341,8 @@ private static IndexMetadata indexMetadata(final Client client, final String ind return clusterStateResponse.getState().metadata().index(index); } - public void testCreateSplitIndex() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + public void testCreateSplitIndex() { + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); prepareCreate("source").setSettings( Settings.builder().put(indexSettings()).put("number_of_shards", 1).put("index.version.created", version) ).get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java index 07f9d9ee7b6c3..92e5eb8e046bc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java @@ -48,7 +48,7 @@ public void testCanRecoverFromStoreWithoutPeerRecoveryRetentionLease() throws Ex Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomCompatibleVersion(random())) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomCompatibleWriteVersion(random())) ) ); ensureGreen(INDEX_NAME); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index 907f943e68422..6c67bd2a98606 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -56,7 +56,7 @@ protected boolean forbidPrivateIndexSettings() { return false; } - private final IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + private final IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); private IndexRequestBuilder indexCity(String idx, String name, String... latLons) throws Exception { XContentBuilder source = jsonBuilder().startObject().field("city", name); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java index 1ad7d1a11bea7..1de51d6df8197 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java @@ -49,7 +49,7 @@ protected boolean forbidPrivateIndexSettings() { return false; } - private final IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + private final IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); static Map expectedDocCountsForGeoHash = null; static Map multiValuedExpectedDocCountsForGeoHash = null; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index 9988624f6a677..a55edf3782bcc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -748,7 +748,7 @@ public void testDateWithoutOrigin() throws Exception { } public void testManyDocsLin() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = jsonBuilder().startObject() .startObject("_doc") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java index a7efb2fe0e68b..fbdcfe26d28ee 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java @@ -38,6 +38,7 @@ import org.elasticsearch.search.collapse.CollapseBuilder; import org.elasticsearch.search.rescore.QueryRescoreMode; import org.elasticsearch.search.rescore.QueryRescorerBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.ParseField; @@ -840,6 +841,20 @@ public void testRescorePhaseWithInvalidSort() throws Exception { } } ); + + assertResponse( + prepareSearch().addSort(SortBuilders.scoreSort()) + .addSort(new FieldSortBuilder(FieldSortBuilder.SHARD_DOC_FIELD_NAME)) + .setTrackScores(true) + .addRescorer(new QueryRescorerBuilder(matchAllQuery()).setRescoreQueryWeight(100.0f), 50), + response -> { + assertThat(response.getHits().getTotalHits().value(), equalTo(5L)); + assertThat(response.getHits().getHits().length, equalTo(5)); + for (SearchHit hit : response.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(101f)); + } + } + ); } record GroupDoc(String id, String group, float firstPassScore, float secondPassScore, boolean shouldFilter) {} @@ -879,6 +894,10 @@ public void testRescoreAfterCollapse() throws Exception { .setQuery(fieldValueScoreQuery("firstPassScore")) .addRescorer(new QueryRescorerBuilder(fieldValueScoreQuery("secondPassScore"))) .setCollapse(new CollapseBuilder("group")); + if (randomBoolean()) { + request.addSort(SortBuilders.scoreSort()); + request.addSort(new FieldSortBuilder(FieldSortBuilder.SHARD_DOC_FIELD_NAME)); + } assertResponse(request, resp -> { assertThat(resp.getHits().getTotalHits().value(), equalTo(5L)); assertThat(resp.getHits().getHits().length, equalTo(3)); @@ -958,6 +977,10 @@ public void testRescoreAfterCollapseRandom() throws Exception { .addRescorer(new QueryRescorerBuilder(fieldValueScoreQuery("secondPassScore")).setQueryWeight(0f).windowSize(numGroups)) .setCollapse(new CollapseBuilder("group")) .setSize(Math.min(numGroups, 10)); + if (randomBoolean()) { + request.addSort(SortBuilders.scoreSort()); + request.addSort(new FieldSortBuilder(FieldSortBuilder.SHARD_DOC_FIELD_NAME)); + } long expectedNumHits = numHits; assertResponse(request, resp -> { assertThat(resp.getHits().getTotalHits().value(), equalTo(expectedNumHits)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java index 2489889be19e5..8104e4ed7a825 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java @@ -32,6 +32,6 @@ public XContentBuilder getMapping() throws IOException { @Override public IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomCompatibleVersion(random()); + return IndexVersionUtils.randomCompatibleWriteVersion(random()); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java index 9b4e28055a988..a309fa81f6dc1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java @@ -96,7 +96,7 @@ protected boolean forbidPrivateIndexSettings() { @Before public void setupTestIndex() throws IOException { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() .startObject() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java index 4b8f29f3cc9a5..aadefd9bd8018 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java @@ -39,7 +39,7 @@ protected boolean forbidPrivateIndexSettings() { @Override protected void setupSuiteScopeCluster() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); assertAcked( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java index e80678c4f5fc6..f55d4505f3f58 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java @@ -45,7 +45,7 @@ protected boolean forbidPrivateIndexSettings() { } public void testDistanceSortingMVFields() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() .startObject() @@ -237,7 +237,7 @@ public void testDistanceSortingMVFields() throws Exception { // Regression bug: // https://github.com/elastic/elasticsearch/issues/2851 public void testDistanceSortingWithMissingGeoPoint() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() .startObject() @@ -299,7 +299,7 @@ public void testDistanceSortingWithMissingGeoPoint() throws Exception { } public void testDistanceSortingNestedFields() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() .startObject() @@ -551,7 +551,7 @@ public void testDistanceSortingNestedFields() throws Exception { * Issue 3073 */ public void testGeoDistanceFilter() throws IOException { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); double lat = 40.720611; double lon = -73.998776; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java index aabca1b9333f8..d53c90a5d1e28 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java @@ -60,7 +60,7 @@ public void testManyToManyGeoPoints() throws ExecutionException, InterruptedExce * |___________________________ * 1 2 3 4 5 6 7 */ - IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).setMapping(LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); @@ -152,7 +152,7 @@ public void testSingeToManyAvgMedian() throws ExecutionException, InterruptedExc * d1 = (0, 1), (0, 4), (0, 10); so avg. distance is 5, median distance is 4 * d2 = (0, 1), (0, 5), (0, 6); so avg. distance is 4, median distance is 5 */ - IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).setMapping(LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); @@ -225,7 +225,7 @@ public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionExcept * |______________________ * 1 2 3 4 5 6 */ - IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).setMapping(LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index d3e235f1cd82a..9bc2487f89b12 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -52,7 +52,10 @@ static TransportVersion def(int id) { @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // remove the transport versions with which v9 will not need to interact public static final TransportVersion ZERO = def(0); public static final TransportVersion V_7_0_0 = def(7_00_00_99); + public static final TransportVersion V_7_1_0 = def(7_01_00_99); + public static final TransportVersion V_7_2_0 = def(7_02_00_99); public static final TransportVersion V_7_3_0 = def(7_03_00_99); + public static final TransportVersion V_7_3_2 = def(7_03_02_99); public static final TransportVersion V_7_4_0 = def(7_04_00_99); public static final TransportVersion V_7_6_0 = def(7_06_00_99); public static final TransportVersion V_7_8_0 = def(7_08_00_99); @@ -140,6 +143,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_QUERY_BUILDER_IN_SEARCH_FUNCTIONS = def(8_808_00_0); public static final TransportVersion EQL_ALLOW_PARTIAL_SEARCH_RESULTS = def(8_809_00_0); public static final TransportVersion NODE_VERSION_INFORMATION_WITH_MIN_READ_ONLY_INDEX_VERSION = def(8_810_00_0); + public static final TransportVersion ERROR_TRACE_IN_TRANSPORT_HEADER = def(8_811_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index cfc2e1bcdaf2b..2041754bc2bcc 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -456,7 +456,8 @@ public static void registerRequestHandler(TransportService transportService, Sea (request, channel, task) -> searchService.executeQueryPhase( request, (SearchShardTask) task, - new ChannelActionListener<>(channel) + new ChannelActionListener<>(channel), + channel.getVersion() ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_ID_ACTION_NAME, true, QuerySearchResult::new); @@ -468,7 +469,8 @@ public static void registerRequestHandler(TransportService transportService, Sea (request, channel, task) -> searchService.executeQueryPhase( request, (SearchShardTask) task, - new ChannelActionListener<>(channel) + new ChannelActionListener<>(channel), + channel.getVersion() ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_SCROLL_ACTION_NAME, true, ScrollQuerySearchResult::new); diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index a9e13b86a5159..6841cb5bead0a 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -24,6 +24,8 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Tuple; import org.elasticsearch.http.HttpTransportSettings; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; import org.elasticsearch.tasks.Task; import org.elasticsearch.telemetry.tracing.TraceContext; @@ -530,6 +532,17 @@ public String getHeader(String key) { return value; } + /** + * Returns the header for the given key or defaultValue if not present + */ + public String getHeaderOrDefault(String key, String defaultValue) { + String value = getHeader(key); + if (value == null) { + return defaultValue; + } + return value; + } + /** * Returns all of the request headers from the thread's context.
* Be advised, headers might contain credentials. @@ -589,6 +602,14 @@ public void putHeader(Map header) { threadLocal.set(threadLocal.get().putHeaders(header)); } + public void setErrorTraceTransportHeader(RestRequest r) { + // set whether data nodes should send back stack trace based on the `error_trace` query parameter + if (r.paramAsBoolean("error_trace", RestController.ERROR_TRACE_DEFAULT)) { + // We only set it if error_trace is true (defaults to false) to avoid sending useless bytes + putHeader("error_trace", "true"); + } + } + /** * Puts a transient header object into this context */ diff --git a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java index 2811c7493a277..6c044ab999899 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java @@ -14,6 +14,8 @@ import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -53,6 +55,8 @@ **/ public final class IndexSortConfig { + private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(IndexSortConfig.class); + /** * The list of field names */ @@ -134,10 +138,14 @@ private static MultiValueMode parseMultiValueMode(String value) { // visible for tests final FieldSortSpec[] sortSpecs; + private final IndexVersion indexCreatedVersion; + private final String indexName; private final IndexMode indexMode; public IndexSortConfig(IndexSettings indexSettings) { final Settings settings = indexSettings.getSettings(); + this.indexCreatedVersion = indexSettings.getIndexVersionCreated(); + this.indexName = indexSettings.getIndex().getName(); this.indexMode = indexSettings.getMode(); if (this.indexMode == IndexMode.TIME_SERIES) { @@ -230,7 +238,22 @@ public Sort buildIndexSort( throw new IllegalArgumentException(err); } if (Objects.equals(ft.name(), sortSpec.field) == false) { - throw new IllegalArgumentException("Cannot use alias [" + sortSpec.field + "] as an index sort field"); + if (this.indexCreatedVersion.onOrAfter(IndexVersions.V_7_13_0)) { + throw new IllegalArgumentException("Cannot use alias [" + sortSpec.field + "] as an index sort field"); + } else { + DEPRECATION_LOGGER.warn( + DeprecationCategory.MAPPINGS, + "index-sort-aliases", + "Index sort for index [" + + indexName + + "] defined on field [" + + sortSpec.field + + "] which resolves to field [" + + ft.name() + + "]. " + + "You will not be able to define an index sort over aliased fields in new indexes" + ); + } } boolean reverse = sortSpec.order == null ? false : (sortSpec.order == SortOrder.DESC); MultiValueMode mode = sortSpec.mode; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java index 31aa787c3f758..033742b3b57fc 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java @@ -10,13 +10,17 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.common.Explicit; +import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Iterator; import java.util.Map; +import java.util.Set; import java.util.function.Function; /** @@ -132,6 +136,8 @@ public final MetadataFieldMapper build(MapperBuilderContext context) { return build(); } + private static final Set UNSUPPORTED_PARAMETERS_8_6_0 = Set.of("type", "fields", "copy_to", "boost"); + public final void parseMetadataField(String name, MappingParserContext parserContext, Map fieldNode) { final Parameter[] params = getParameters(); Map> paramsMap = Maps.newHashMapWithExpectedSize(params.length); @@ -144,6 +150,22 @@ public final void parseMetadataField(String name, MappingParserContext parserCon final Object propNode = entry.getValue(); Parameter parameter = paramsMap.get(propName); if (parameter == null) { + IndexVersion indexVersionCreated = parserContext.indexVersionCreated(); + if (indexVersionCreated.before(IndexVersions.UPGRADE_TO_LUCENE_10_0_0) + && UNSUPPORTED_PARAMETERS_8_6_0.contains(propName)) { + if (indexVersionCreated.onOrAfter(IndexVersions.V_8_6_0)) { + // silently ignore type, and a few other parameters: sadly we've been doing this for a long time + deprecationLogger.warn( + DeprecationCategory.API, + propName, + "Parameter [{}] has no effect on metadata field [{}] and will be removed in future", + propName, + name + ); + } + iterator.remove(); + continue; + } throw new MapperParsingException("unknown parameter [" + propName + "] on metadata field [" + name + "]"); } parameter.parse(name, parserContext, propNode); diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 17e56a392daff..5cfe1c104d45e 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -83,7 +83,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -553,9 +553,10 @@ private SettingsModule validateSettings(Settings envSettings, Settings settings, return settingsModule; } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_FOUNDATIONS) + @UpdateForV10(owner = UpdateForV10.Owner.SEARCH_FOUNDATIONS) private static void addBwcSearchWorkerSettings(List> additionalSettings) { - // TODO remove the below settings, they are unused and only here to enable BwC for deployments that still use them + // Search workers thread pool has been removed in Elasticsearch 8.16.0. These settings are deprecated and take no effect. + // They are here only to enable BwC for deployments that still use them additionalSettings.add( Setting.intSetting("thread_pool.search_worker.queue_size", 0, Setting.Property.NodeScope, Setting.Property.DeprecatedWarning) ); diff --git a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 4564a37dacf4a..509086b982319 100644 --- a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -269,5 +269,4 @@ protected Set responseParams() { protected Set responseParams(RestApiVersion restApiVersion) { return responseParams(); } - } diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index 49fe794bbe615..49801499ea991 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -93,6 +93,7 @@ public class RestController implements HttpServerTransport.Dispatcher { public static final String STATUS_CODE_KEY = "es_rest_status_code"; public static final String HANDLER_NAME_KEY = "es_rest_handler_name"; public static final String REQUEST_METHOD_KEY = "es_rest_request_method"; + public static final boolean ERROR_TRACE_DEFAULT = false; static { try (InputStream stream = RestController.class.getResourceAsStream("/config/favicon.ico")) { @@ -638,7 +639,7 @@ private void tryAllHandlers(final RestRequest request, final RestChannel channel private static void validateErrorTrace(RestRequest request, RestChannel channel) { // error_trace cannot be used when we disable detailed errors // we consume the error_trace parameter first to ensure that it is always consumed - if (request.paramAsBoolean("error_trace", false) && channel.detailedErrorsEnabled() == false) { + if (request.paramAsBoolean("error_trace", ERROR_TRACE_DEFAULT) && channel.detailedErrorsEnabled() == false) { throw new IllegalArgumentException("error traces in responses are disabled."); } } diff --git a/server/src/main/java/org/elasticsearch/rest/RestResponse.java b/server/src/main/java/org/elasticsearch/rest/RestResponse.java index d043974055667..0c359e0a4a053 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestResponse.java +++ b/server/src/main/java/org/elasticsearch/rest/RestResponse.java @@ -37,6 +37,7 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; import static org.elasticsearch.rest.RestController.ELASTIC_PRODUCT_HTTP_HEADER; +import static org.elasticsearch.rest.RestController.ERROR_TRACE_DEFAULT; public final class RestResponse implements Releasable { @@ -143,7 +144,7 @@ public RestResponse(RestChannel channel, RestStatus status, Exception e) throws // switched in the xcontent rendering parameters. // For authorization problems (RestStatus.UNAUTHORIZED) we don't want to do this since this could // leak information to the caller who is unauthorized to make this call - if (params.paramAsBoolean("error_trace", false) && status != RestStatus.UNAUTHORIZED) { + if (params.paramAsBoolean("error_trace", ERROR_TRACE_DEFAULT) && status != RestStatus.UNAUTHORIZED) { params = new ToXContent.DelegatingMapParams(singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false"), params); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index 24fab92ced392..87b1a6b9c2fa8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -72,6 +72,9 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + if (client.threadPool() != null && client.threadPool().getThreadContext() != null) { + client.threadPool().getThreadContext().setErrorTraceTransportHeader(request); + } final MultiSearchRequest multiSearchRequest = parseRequest(request, allowExplicitIndex, searchUsageHolder, clusterSupportsFeature); return channel -> { final RestCancellableNodeClient cancellableClient = new RestCancellableNodeClient(client, request.getHttpChannel()); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index a9c2ff7576b05..99c11bb60b8f0 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -95,7 +95,9 @@ public Set supportedCapabilities() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - + if (client.threadPool() != null && client.threadPool().getThreadContext() != null) { + client.threadPool().getThreadContext().setErrorTraceTransportHeader(request); + } SearchRequest searchRequest = new SearchRequest(); // access the BwC param, but just drop it // this might be set by old clients diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index b87d097413b67..47d3ed337af73 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -73,6 +73,7 @@ import org.elasticsearch.search.rank.context.QueryPhaseRankShardContext; import org.elasticsearch.search.rank.feature.RankFeatureResult; import org.elasticsearch.search.rescore.RescoreContext; +import org.elasticsearch.search.rescore.RescorePhase; import org.elasticsearch.search.slice.SliceBuilder; import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -377,7 +378,7 @@ public void preProcess() { ); } if (rescore != null) { - if (sort != null) { + if (RescorePhase.validateSort(sort) == false) { throw new IllegalArgumentException("Cannot use [sort] option in conjunction with [rescore]."); } int maxWindow = indexService.getIndexSettings().getMaxRescoreWindow(); diff --git a/server/src/main/java/org/elasticsearch/search/SearchFeatures.java b/server/src/main/java/org/elasticsearch/search/SearchFeatures.java index beac39c2de304..553511346b182 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchFeatures.java +++ b/server/src/main/java/org/elasticsearch/search/SearchFeatures.java @@ -23,4 +23,11 @@ public final class SearchFeatures implements FeatureSpecification { public Set getFeatures() { return Set.of(KnnVectorQueryBuilder.K_PARAM_SUPPORTED, LUCENE_10_0_0_UPGRADE); } + + public static final NodeFeature RETRIEVER_RESCORER_ENABLED = new NodeFeature("search.retriever.rescorer.enabled"); + + @Override + public Set getTestFeatures() { + return Set.of(RETRIEVER_RESCORER_ENABLED); + } } diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index d282ba425b126..3294e1ba03f6b 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -231,6 +231,7 @@ import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.rescore.RescorerBuilder; import org.elasticsearch.search.retriever.KnnRetrieverBuilder; +import org.elasticsearch.search.retriever.RescorerRetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverParserContext; import org.elasticsearch.search.retriever.StandardRetrieverBuilder; @@ -1080,6 +1081,7 @@ private void registerFetchSubPhase(FetchSubPhase subPhase) { private void registerRetrieverParsers(List plugins) { registerRetriever(new RetrieverSpec<>(StandardRetrieverBuilder.NAME, StandardRetrieverBuilder::fromXContent)); registerRetriever(new RetrieverSpec<>(KnnRetrieverBuilder.NAME, KnnRetrieverBuilder::fromXContent)); + registerRetriever(new RetrieverSpec<>(RescorerRetrieverBuilder.NAME, RescorerRetrieverBuilder::fromXContent)); registerFromPlugin(plugins, SearchPlugin::getRetrievers, this::registerRetriever); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index b9bd398500c71..4557ccb3d2220 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -17,6 +17,8 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TopDocs; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.ResolvedIndices; @@ -152,6 +154,7 @@ import java.util.function.LongSupplier; import java.util.function.Supplier; +import static org.elasticsearch.TransportVersions.ERROR_TRACE_IN_TRANSPORT_HEADER; import static org.elasticsearch.core.TimeValue.timeValueHours; import static org.elasticsearch.core.TimeValue.timeValueMillis; import static org.elasticsearch.core.TimeValue.timeValueMinutes; @@ -272,6 +275,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv public static final int DEFAULT_SIZE = 10; public static final int DEFAULT_FROM = 0; + private static final StackTraceElement[] EMPTY_STACK_TRACE_ARRAY = new StackTraceElement[0]; private final ThreadPool threadPool; @@ -506,7 +510,41 @@ protected void doClose() { keepAliveReaper.cancel(); } + /** + * Wraps the listener to avoid sending StackTraces back to the coordinating + * node if the `error_trace` header is set to {@code false}. Upon reading we + * default to {@code true} to maintain the same behavior as before the change, + * due to older nodes not being able to specify whether it needs stack traces. + * + * @param the type of the response + * @param listener the action listener to be wrapped + * @param version channel version of the request + * @param threadPool with context where to write the new header + * @return the wrapped action listener + */ + static ActionListener maybeWrapListenerForStackTrace( + ActionListener listener, + TransportVersion version, + ThreadPool threadPool + ) { + boolean header = true; + if (version.onOrAfter(ERROR_TRACE_IN_TRANSPORT_HEADER) && threadPool.getThreadContext() != null) { + header = Boolean.parseBoolean(threadPool.getThreadContext().getHeaderOrDefault("error_trace", "false")); + } + if (header == false) { + return listener.delegateResponse((l, e) -> { + ExceptionsHelper.unwrapCausesAndSuppressed(e, err -> { + err.setStackTrace(EMPTY_STACK_TRACE_ARRAY); + return false; + }); + l.onFailure(e); + }); + } + return listener; + } + public void executeDfsPhase(ShardSearchRequest request, SearchShardTask task, ActionListener listener) { + listener = maybeWrapListenerForStackTrace(listener, request.getChannelVersion(), threadPool); final IndexShard shard = getShard(request); rewriteAndFetchShardRequest(shard, request, listener.delegateFailure((l, rewritten) -> { // fork the execution in the search thread pool @@ -544,10 +582,11 @@ private void loadOrExecuteQueryPhase(final ShardSearchRequest request, final Sea } public void executeQueryPhase(ShardSearchRequest request, SearchShardTask task, ActionListener listener) { + ActionListener finalListener = maybeWrapListenerForStackTrace(listener, request.getChannelVersion(), threadPool); assert request.canReturnNullResponseIfMatchNoDocs() == false || request.numberOfShards() > 1 : "empty responses require more than one shard"; final IndexShard shard = getShard(request); - rewriteAndFetchShardRequest(shard, request, listener.delegateFailure((l, orig) -> { + rewriteAndFetchShardRequest(shard, request, finalListener.delegateFailure((l, orig) -> { // check if we can shortcut the query phase entirely. if (orig.canReturnNullResponseIfMatchNoDocs()) { assert orig.scroll() == null; @@ -561,7 +600,7 @@ public void executeQueryPhase(ShardSearchRequest request, SearchShardTask task, ); CanMatchShardResponse canMatchResp = canMatch(canMatchContext, false); if (canMatchResp.canMatch() == false) { - listener.onResponse(QuerySearchResult.nullInstance()); + finalListener.onResponse(QuerySearchResult.nullInstance()); return; } } @@ -736,6 +775,7 @@ private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchSh } public void executeRankFeaturePhase(RankFeatureShardRequest request, SearchShardTask task, ActionListener listener) { + listener = maybeWrapListenerForStackTrace(listener, request.getShardSearchRequest().getChannelVersion(), threadPool); final ReaderContext readerContext = findReaderContext(request.contextId(), request); final ShardSearchRequest shardSearchRequest = readerContext.getShardSearchRequest(request.getShardSearchRequest()); final Releasable markAsUsed = readerContext.markAsUsed(getKeepAlive(shardSearchRequest)); @@ -779,8 +819,10 @@ private QueryFetchSearchResult executeFetchPhase(ReaderContext reader, SearchCon public void executeQueryPhase( InternalScrollSearchRequest request, SearchShardTask task, - ActionListener listener + ActionListener listener, + TransportVersion version ) { + listener = maybeWrapListenerForStackTrace(listener, version, threadPool); final LegacyReaderContext readerContext = (LegacyReaderContext) findReaderContext(request.contextId(), request); final Releasable markAsUsed; try { @@ -816,7 +858,13 @@ public void executeQueryPhase( * It is the responsibility of the caller to ensure that the ref count is correctly decremented * when the object is no longer needed. */ - public void executeQueryPhase(QuerySearchRequest request, SearchShardTask task, ActionListener listener) { + public void executeQueryPhase( + QuerySearchRequest request, + SearchShardTask task, + ActionListener listener, + TransportVersion version + ) { + listener = maybeWrapListenerForStackTrace(listener, version, threadPool); final ReaderContext readerContext = findReaderContext(request.contextId(), request.shardSearchRequest()); final ShardSearchRequest shardSearchRequest = readerContext.getShardSearchRequest(request.shardSearchRequest()); final Releasable markAsUsed = readerContext.markAsUsed(getKeepAlive(shardSearchRequest)); diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 3554a6dc08b90..8c21abe4180ea 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -48,9 +48,7 @@ import org.elasticsearch.search.retriever.RetrieverParserContext; import org.elasticsearch.search.searchafter.SearchAfterBuilder; import org.elasticsearch.search.slice.SliceBuilder; -import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; -import org.elasticsearch.search.sort.ShardDocSortField; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; @@ -2341,18 +2339,6 @@ public ActionRequestValidationException validate( validationException = rescorer.validate(this, validationException); } } - - if (pointInTimeBuilder() == null && sorts() != null) { - for (var sortBuilder : sorts()) { - if (sortBuilder instanceof FieldSortBuilder fieldSortBuilder - && ShardDocSortField.NAME.equals(fieldSortBuilder.getFieldName())) { - validationException = addValidationError( - "[" + FieldSortBuilder.SHARD_DOC_FIELD_NAME + "] sort field cannot be used without [point in time]", - validationException - ); - } - } - } return validationException; } } diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java index cbc04dd460ff5..3d793a164f40a 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java @@ -58,6 +58,7 @@ import org.elasticsearch.search.profile.query.CollectorResult; import org.elasticsearch.search.profile.query.InternalProfileCollector; import org.elasticsearch.search.rescore.RescoreContext; +import org.elasticsearch.search.rescore.RescorePhase; import org.elasticsearch.search.sort.SortAndFormats; import java.io.IOException; @@ -238,7 +239,7 @@ static CollectorManager createQueryPhaseCollectorMa int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs); final boolean rescore = searchContext.rescore().isEmpty() == false; if (rescore) { - assert searchContext.sort() == null; + assert RescorePhase.validateSort(searchContext.sort()); for (RescoreContext rescoreContext : searchContext.rescore()) { numDocs = Math.max(numDocs, rescoreContext.getWindowSize()); } diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java b/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java index 7e3646e7689cc..c23df9cdfa441 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TopFieldDocs; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.search.SearchShardTask; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; @@ -22,9 +23,12 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.query.QueryPhase; import org.elasticsearch.search.query.SearchTimeoutException; +import org.elasticsearch.search.sort.ShardDocSortField; +import org.elasticsearch.search.sort.SortAndFormats; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; @@ -39,15 +43,27 @@ public static void execute(SearchContext context) { if (context.size() == 0 || context.rescore() == null || context.rescore().isEmpty()) { return; } - + if (validateSort(context.sort()) == false) { + throw new IllegalStateException("Cannot use [sort] option in conjunction with [rescore], missing a validate?"); + } TopDocs topDocs = context.queryResult().topDocs().topDocs; if (topDocs.scoreDocs.length == 0) { return; } + // Populate FieldDoc#score using the primary sort field (_score) to ensure compatibility with top docs rescoring + Arrays.stream(topDocs.scoreDocs).forEach(t -> { + if (t instanceof FieldDoc fieldDoc) { + fieldDoc.score = (float) fieldDoc.fields[0]; + } + }); TopFieldGroups topGroups = null; + TopFieldDocs topFields = null; if (topDocs instanceof TopFieldGroups topFieldGroups) { - assert context.collapse() != null; + assert context.collapse() != null && validateSortFields(topFieldGroups.fields); topGroups = topFieldGroups; + } else if (topDocs instanceof TopFieldDocs topFieldDocs) { + assert validateSortFields(topFieldDocs.fields); + topFields = topFieldDocs; } try { Runnable cancellationCheck = getCancellationChecks(context); @@ -56,17 +72,18 @@ public static void execute(SearchContext context) { topDocs = ctx.rescorer().rescore(topDocs, context.searcher(), ctx); // It is the responsibility of the rescorer to sort the resulted top docs, // here we only assert that this condition is met. - assert context.sort() == null && topDocsSortedByScore(topDocs) : "topdocs should be sorted after rescore"; + assert topDocsSortedByScore(topDocs) : "topdocs should be sorted after rescore"; ctx.setCancellationChecker(null); } + /** + * Since rescorers are building top docs with score only, we must reconstruct the {@link TopFieldGroups} + * or {@link TopFieldDocs} using their original version before rescoring. + */ if (topGroups != null) { assert context.collapse() != null; - /** - * Since rescorers don't preserve collapsing, we must reconstruct the group and field - * values from the originalTopGroups to create a new {@link TopFieldGroups} from the - * rescored top documents. - */ - topDocs = rewriteTopGroups(topGroups, topDocs); + topDocs = rewriteTopFieldGroups(topGroups, topDocs); + } else if (topFields != null) { + topDocs = rewriteTopFieldDocs(topFields, topDocs); } context.queryResult() .topDocs(new TopDocsAndMaxScore(topDocs, topDocs.scoreDocs[0].score), context.queryResult().sortValueFormats()); @@ -81,29 +98,84 @@ public static void execute(SearchContext context) { } } - private static TopFieldGroups rewriteTopGroups(TopFieldGroups originalTopGroups, TopDocs rescoredTopDocs) { - assert originalTopGroups.fields.length == 1 && SortField.FIELD_SCORE.equals(originalTopGroups.fields[0]) - : "rescore must always sort by score descending"; + /** + * Returns whether the provided {@link SortAndFormats} can be used to rescore + * top documents. + */ + public static boolean validateSort(SortAndFormats sortAndFormats) { + if (sortAndFormats == null) { + return true; + } + return validateSortFields(sortAndFormats.sort.getSort()); + } + + private static boolean validateSortFields(SortField[] fields) { + if (fields[0].equals(SortField.FIELD_SCORE) == false) { + return false; + } + if (fields.length == 1) { + return true; + } + + // The ShardDocSortField can be used as a tiebreaker because it maintains + // the natural document ID order within the shard. + if (fields[1] instanceof ShardDocSortField == false || fields[1].getReverse()) { + return false; + } + return true; + } + + private static TopFieldDocs rewriteTopFieldDocs(TopFieldDocs originalTopFieldDocs, TopDocs rescoredTopDocs) { + Map docIdToFieldDoc = Maps.newMapWithExpectedSize(originalTopFieldDocs.scoreDocs.length); + for (int i = 0; i < originalTopFieldDocs.scoreDocs.length; i++) { + docIdToFieldDoc.put(originalTopFieldDocs.scoreDocs[i].doc, (FieldDoc) originalTopFieldDocs.scoreDocs[i]); + } + var newScoreDocs = new FieldDoc[rescoredTopDocs.scoreDocs.length]; + int pos = 0; + for (var doc : rescoredTopDocs.scoreDocs) { + newScoreDocs[pos] = docIdToFieldDoc.get(doc.doc); + newScoreDocs[pos].score = doc.score; + newScoreDocs[pos].fields[0] = newScoreDocs[pos].score; + pos++; + } + return new TopFieldDocs(originalTopFieldDocs.totalHits, newScoreDocs, originalTopFieldDocs.fields); + } + + private static TopFieldGroups rewriteTopFieldGroups(TopFieldGroups originalTopGroups, TopDocs rescoredTopDocs) { + var newFieldDocs = rewriteFieldDocs((FieldDoc[]) originalTopGroups.scoreDocs, rescoredTopDocs.scoreDocs); + Map docIdToGroupValue = Maps.newMapWithExpectedSize(originalTopGroups.scoreDocs.length); for (int i = 0; i < originalTopGroups.scoreDocs.length; i++) { docIdToGroupValue.put(originalTopGroups.scoreDocs[i].doc, originalTopGroups.groupValues[i]); } - var newScoreDocs = new FieldDoc[rescoredTopDocs.scoreDocs.length]; var newGroupValues = new Object[originalTopGroups.groupValues.length]; int pos = 0; for (var doc : rescoredTopDocs.scoreDocs) { - newScoreDocs[pos] = new FieldDoc(doc.doc, doc.score, new Object[] { doc.score }); newGroupValues[pos++] = docIdToGroupValue.get(doc.doc); } return new TopFieldGroups( originalTopGroups.field, originalTopGroups.totalHits, - newScoreDocs, + newFieldDocs, originalTopGroups.fields, newGroupValues ); } + private static FieldDoc[] rewriteFieldDocs(FieldDoc[] originalTopDocs, ScoreDoc[] rescoredTopDocs) { + Map docIdToFieldDoc = Maps.newMapWithExpectedSize(rescoredTopDocs.length); + Arrays.stream(originalTopDocs).forEach(d -> docIdToFieldDoc.put(d.doc, d)); + var newDocs = new FieldDoc[rescoredTopDocs.length]; + int pos = 0; + for (var doc : rescoredTopDocs) { + newDocs[pos] = docIdToFieldDoc.get(doc.doc); + newDocs[pos].score = doc.score; + newDocs[pos].fields[0] = doc.score; + pos++; + } + return newDocs; + } + /** * Returns true if the provided docs are sorted by score. */ diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java b/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java index f624961515389..38a319321207f 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java @@ -39,7 +39,7 @@ public abstract class RescorerBuilder> protected Integer windowSize; - private static final ParseField WINDOW_SIZE_FIELD = new ParseField("window_size"); + public static final ParseField WINDOW_SIZE_FIELD = new ParseField("window_size"); /** * Construct an empty RescoreBuilder. diff --git a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java index 2ab6395db73b5..298340e5c579e 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java @@ -32,10 +32,12 @@ import org.elasticsearch.search.sort.ScoreSortBuilder; import org.elasticsearch.search.sort.ShardDocSortField; import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.xcontent.ParseField; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Locale; import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -49,6 +51,8 @@ public abstract class CompoundRetrieverBuilder rankWindowSize) { validationException = addValidationError( - "[" - + this.getName() - + "] requires [rank_window_size: " - + rankWindowSize - + "]" - + " be greater than or equal to [size: " - + source.size() - + "]", + String.format( + Locale.ROOT, + "[%s] requires [%s: %d] be greater than or equal to [size: %d]", + getName(), + getRankWindowSizeField().getPreferredName(), + rankWindowSize, + source.size() + ), validationException ); } @@ -231,6 +243,21 @@ public ActionRequestValidationException validate( } for (RetrieverSource innerRetriever : innerRetrievers) { validationException = innerRetriever.retriever().validate(source, validationException, isScroll, allowPartialSearchResults); + if (innerRetriever.retriever() instanceof CompoundRetrieverBuilder compoundChild) { + if (rankWindowSize > compoundChild.rankWindowSize) { + String errorMessage = String.format( + Locale.ROOT, + "[%s] requires [%s: %d] to be smaller than or equal to its sub retriever's %s [%s: %d]", + this.getName(), + getRankWindowSizeField().getPreferredName(), + rankWindowSize, + compoundChild.getName(), + compoundChild.getRankWindowSizeField(), + compoundChild.rankWindowSize + ); + validationException = addValidationError(errorMessage, validationException); + } + } } return validationException; } diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RescorerRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RescorerRetrieverBuilder.java new file mode 100644 index 0000000000000..09688b5b9b001 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/RescorerRetrieverBuilder.java @@ -0,0 +1,173 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.search.retriever; + +import org.apache.lucene.search.ScoreDoc; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.rescore.RescorerBuilder; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.search.builder.SearchSourceBuilder.RESCORE_FIELD; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +/** + * A {@link CompoundRetrieverBuilder} that re-scores only the results produced by its child retriever. + */ +public final class RescorerRetrieverBuilder extends CompoundRetrieverBuilder { + + public static final String NAME = "rescorer"; + public static final ParseField RETRIEVER_FIELD = new ParseField("retriever"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME, + args -> new RescorerRetrieverBuilder((RetrieverBuilder) args[0], (List>) args[1]) + ); + + static { + PARSER.declareNamedObject(constructorArg(), (parser, context, n) -> { + RetrieverBuilder innerRetriever = parser.namedObject(RetrieverBuilder.class, n, context); + context.trackRetrieverUsage(innerRetriever.getName()); + return innerRetriever; + }, RETRIEVER_FIELD); + PARSER.declareField(constructorArg(), (parser, context) -> { + if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + List> rescorers = new ArrayList<>(); + while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) { + rescorers.add(RescorerBuilder.parseFromXContent(parser, name -> context.trackRescorerUsage(name))); + } + return rescorers; + } else if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + return List.of(RescorerBuilder.parseFromXContent(parser, name -> context.trackRescorerUsage(name))); + } else { + throw new IllegalArgumentException( + "Unknown format for [rescorer.rescore], expects an object or an array of objects, got: " + parser.currentToken() + ); + } + }, RESCORE_FIELD, ObjectParser.ValueType.OBJECT_ARRAY); + RetrieverBuilder.declareBaseParserFields(NAME, PARSER); + } + + public static RescorerRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) throws IOException { + try { + return PARSER.apply(parser, context); + } catch (Exception e) { + throw new ParsingException(parser.getTokenLocation(), e.getMessage(), e); + } + } + + private final List> rescorers; + + public RescorerRetrieverBuilder(RetrieverBuilder retriever, List> rescorers) { + super(List.of(new RetrieverSource(retriever, null)), extractMinWindowSize(rescorers)); + if (rescorers.isEmpty()) { + throw new IllegalArgumentException("Missing rescore definition"); + } + this.rescorers = rescorers; + } + + private RescorerRetrieverBuilder(RetrieverSource retriever, List> rescorers) { + super(List.of(retriever), extractMinWindowSize(rescorers)); + this.rescorers = rescorers; + } + + /** + * The minimum window size is used as the {@link CompoundRetrieverBuilder#rankWindowSize}, + * the final number of top documents to return in this retriever. + */ + private static int extractMinWindowSize(List> rescorers) { + int windowSize = Integer.MAX_VALUE; + for (var rescore : rescorers) { + windowSize = Math.min(rescore.windowSize() == null ? RescorerBuilder.DEFAULT_WINDOW_SIZE : rescore.windowSize(), windowSize); + } + return windowSize; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public ParseField getRankWindowSizeField() { + return RescorerBuilder.WINDOW_SIZE_FIELD; + } + + @Override + protected SearchSourceBuilder finalizeSourceBuilder(SearchSourceBuilder source) { + /** + * The re-scorer is passed downstream because this query operates only on + * the top documents retrieved by the child retriever. + * + * - If the sub-retriever is a {@link CompoundRetrieverBuilder}, only the top + * documents are re-scored since they are already determined at this stage. + * - For other retrievers that do not require a rewrite, the re-scorer's window + * size is applied per shard. As a result, more documents are re-scored + * compared to the final top documents produced by these retrievers in isolation. + */ + for (var rescorer : rescorers) { + source.addRescorer(rescorer); + } + return source; + } + + @Override + public void doToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(RETRIEVER_FIELD.getPreferredName(), innerRetrievers.getFirst().retriever()); + builder.startArray(RESCORE_FIELD.getPreferredName()); + for (RescorerBuilder rescorer : rescorers) { + rescorer.toXContent(builder, params); + } + builder.endArray(); + } + + @Override + protected RescorerRetrieverBuilder clone(List newChildRetrievers, List newPreFilterQueryBuilders) { + var newInstance = new RescorerRetrieverBuilder(newChildRetrievers.get(0), rescorers); + newInstance.preFilterQueryBuilders = newPreFilterQueryBuilders; + return newInstance; + } + + @Override + protected RankDoc[] combineInnerRetrieverResults(List rankResults) { + assert rankResults.size() == 1; + ScoreDoc[] scoreDocs = rankResults.getFirst(); + RankDoc[] rankDocs = new RankDoc[scoreDocs.length]; + for (int i = 0; i < scoreDocs.length; i++) { + ScoreDoc scoreDoc = scoreDocs[i]; + rankDocs[i] = new RankDoc(scoreDoc.doc, scoreDoc.score, scoreDoc.shardIndex); + rankDocs[i].rank = i + 1; + } + return rankDocs; + } + + @Override + public boolean doEquals(Object o) { + RescorerRetrieverBuilder that = (RescorerRetrieverBuilder) o; + return super.doEquals(o) && Objects.equals(rescorers, that.rescorers); + } + + @Override + public int doHashCode() { + return Objects.hash(super.doHashCode(), rescorers); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java index d52c354cad69e..b9bfdfdf3402f 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java @@ -63,7 +63,7 @@ protected static void declareBaseParserFields( AbstractObjectParser parser ) { parser.declareObjectArray( - (r, v) -> r.preFilterQueryBuilders = v, + (r, v) -> r.preFilterQueryBuilders = new ArrayList<>(v), (p, c) -> AbstractQueryBuilder.parseTopLevelQuery(p, c::trackQueryUsage), PRE_FILTER_FIELD ); diff --git a/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index 040a7a6205f9c..462fab651c211 100644 --- a/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -88,6 +88,7 @@ grant codeBase "${codebase.elasticsearch}" { // this is the test-framework, but the jar is horribly named grant codeBase "${codebase.framework}" { permission java.lang.RuntimePermission "setSecurityManager"; + permission java.lang.RuntimePermission "createClassLoader"; }; grant codeBase "${codebase.elasticsearch-rest-client}" { @@ -129,4 +130,5 @@ grant { permission java.nio.file.LinkPermission "symbolic"; // needed for keystore tests permission java.lang.RuntimePermission "accessUserInformation"; + permission java.lang.RuntimePermission "getClassLoader"; }; diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java index 3b122864aa472..6ee86470861b4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java @@ -97,7 +97,7 @@ public void testCustomSimilarity() { .put("index.similarity.my_similarity.after_effect", "l") .build() ); - service.verifyIndexMetadata(src, IndexVersions.MINIMUM_COMPATIBLE); + service.verifyIndexMetadata(src, IndexVersions.MINIMUM_READONLY_COMPATIBLE); } public void testIncompatibleVersion() { diff --git a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java index 441ad8a5a225a..7221d69b74d46 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java @@ -160,6 +160,20 @@ public void testSortingAgainstAliases() { assertEquals("Cannot use alias [field] as an index sort field", e.getMessage()); } + public void testSortingAgainstAliasesPre713() { + IndexSettings indexSettings = indexSettings( + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersions.V_7_12_0).put("index.sort.field", "field").build() + ); + MappedFieldType aliased = new KeywordFieldMapper.KeywordFieldType("aliased"); + Sort sort = buildIndexSort(indexSettings, Map.of("field", aliased)); + assertThat(sort.getSort(), arrayWithSize(1)); + assertThat(sort.getSort()[0].getField(), equalTo("aliased")); + assertWarnings( + "Index sort for index [test] defined on field [field] which resolves to field [aliased]. " + + "You will not be able to define an index sort over aliased fields in new indexes" + ); + } + public void testTimeSeriesMode() { IndexSettings indexSettings = indexSettings( Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java index f5b86f422915e..7f3399cb24a15 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java @@ -55,6 +55,10 @@ public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() { PreBuiltAnalyzers.KEYWORD.getAnalyzer(IndexVersion.current()), is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(IndexVersions.MINIMUM_COMPATIBLE)) ); + assertThat( + PreBuiltAnalyzers.KEYWORD.getAnalyzer(IndexVersion.current()), + is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(IndexVersions.MINIMUM_READONLY_COMPATIBLE)) + ); } public void testThatInstancesAreCachedAndReused() { diff --git a/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java index 74d6e83aff266..b8600842effe4 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java @@ -44,7 +44,7 @@ import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class RecoverySourcePruneMergePolicyTests extends ESTestCase { @@ -191,7 +191,7 @@ public void testPruneSome() throws IOException { } assertEquals(i, extra_source.docID()); if (syntheticRecoverySource) { - assertThat(extra_source.longValue(), greaterThan(10L)); + assertThat(extra_source.longValue(), greaterThanOrEqualTo(10L)); } else { assertThat(extra_source.longValue(), equalTo(1L)); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java index e0f58b8922be2..b87ab09c530d6 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java @@ -322,7 +322,11 @@ public void testBlankFieldName() throws Exception { } public void testBlankFieldNameBefore8_6_0() throws Exception { - IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.V_8_5_0); + IndexVersion version = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersions.V_8_5_0 + ); TransportVersion transportVersion = TransportVersionUtils.randomVersionBetween( random(), TransportVersions.MINIMUM_COMPATIBLE, diff --git a/server/src/test/java/org/elasticsearch/index/replication/RetentionLeasesReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RetentionLeasesReplicationTests.java index 8f45a15c73bb6..1f82d7998257e 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RetentionLeasesReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RetentionLeasesReplicationTests.java @@ -147,7 +147,7 @@ protected void syncRetentionLeases(ShardId id, RetentionLeases leases, ActionLis public void testTurnOffTranslogRetentionAfterAllShardStarted() throws Exception { final Settings.Builder settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); if (randomBoolean()) { - settings.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomCompatibleVersion(random())); + settings.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomCompatibleWriteVersion(random())); } try (ReplicationGroup group = createGroup(between(1, 2), settings.build())) { group.startAll(); diff --git a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java index f6d7b3d1d65f3..ecb942492af53 100644 --- a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java @@ -74,7 +74,7 @@ public float score(float freq, long norm) { }; IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(IndexVersions.MINIMUM_COMPATIBLE, negativeScoresSim) + () -> SimilarityService.validateSimilarity(IndexVersions.MINIMUM_READONLY_COMPATIBLE, negativeScoresSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarities should not return negative scores")); @@ -99,7 +99,7 @@ public float score(float freq, long norm) { }; e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(IndexVersions.MINIMUM_COMPATIBLE, decreasingScoresWithFreqSim) + () -> SimilarityService.validateSimilarity(IndexVersions.MINIMUM_READONLY_COMPATIBLE, decreasingScoresWithFreqSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarity scores should not decrease when term frequency increases")); @@ -124,7 +124,7 @@ public float score(float freq, long norm) { }; e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(IndexVersions.MINIMUM_COMPATIBLE, increasingScoresWithNormSim) + () -> SimilarityService.validateSimilarity(IndexVersions.MINIMUM_READONLY_COMPATIBLE, increasingScoresWithNormSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarity scores should not increase when norm increases")); } diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index ab65d56557ad9..0e333491588a6 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.indices; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; @@ -98,8 +97,6 @@ public Map getMetadataMappers() { DataStreamTimestampFieldMapper.NAME, FieldNamesFieldMapper.NAME }; - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_FOUNDATIONS) - @AwaitsFix(bugUrl = "test is referencing 7.x index versions so needs to be updated for 9.0 bump") public void testBuiltinMappers() { IndicesModule module = new IndicesModule(Collections.emptyList()); { @@ -239,14 +236,14 @@ public Map getMetadataMappers() { public void testFieldNamesIsLast() { IndicesModule module = new IndicesModule(Collections.emptyList()); - IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()); + IndexVersion version = IndexVersionUtils.randomVersion(); List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers(version).keySet()); assertEquals(FieldNamesFieldMapper.NAME, fieldNames.get(fieldNames.size() - 1)); } public void testFieldNamesIsLastWithPlugins() { IndicesModule module = new IndicesModule(fakePlugins); - IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()); + IndexVersion version = IndexVersionUtils.randomVersion(); List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers(version).keySet()); assertEquals(FieldNamesFieldMapper.NAME, fieldNames.get(fieldNames.size() - 1)); } diff --git a/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java b/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java index b85ad31288c8c..bd810cea216fc 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java @@ -51,6 +51,7 @@ import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; import static org.elasticsearch.ElasticsearchExceptionTests.assertDeepEquals; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.rest.RestController.ERROR_TRACE_DEFAULT; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -180,7 +181,7 @@ public void testStackTrace() throws IOException { } else { assertThat(response.status(), is(RestStatus.BAD_REQUEST)); } - boolean traceExists = request.paramAsBoolean("error_trace", false) && channel.detailedErrorsEnabled(); + boolean traceExists = request.paramAsBoolean("error_trace", ERROR_TRACE_DEFAULT) && channel.detailedErrorsEnabled(); if (traceExists) { assertThat(response.content().utf8ToString(), containsString(ElasticsearchException.STACK_TRACE)); } else { diff --git a/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java b/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java index dcaa64ede9e89..48d09c75cb2d1 100644 --- a/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java @@ -51,12 +51,12 @@ public void testFloatVectorClassBindings() throws IOException { BinaryDenseVectorScriptDocValuesTests.wrap( new float[][] { docVector }, ElementType.FLOAT, - IndexVersions.MINIMUM_COMPATIBLE + IndexVersions.MINIMUM_READONLY_COMPATIBLE ), "test", ElementType.FLOAT, dims, - IndexVersions.MINIMUM_COMPATIBLE + IndexVersions.MINIMUM_READONLY_COMPATIBLE ), new BinaryDenseVectorDocValuesField( BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.FLOAT, IndexVersion.current()), @@ -303,12 +303,12 @@ public void testByteVsFloatSimilarity() throws IOException { BinaryDenseVectorScriptDocValuesTests.wrap( new float[][] { docVector }, ElementType.FLOAT, - IndexVersions.MINIMUM_COMPATIBLE + IndexVersions.MINIMUM_READONLY_COMPATIBLE ), "field0", ElementType.FLOAT, dims, - IndexVersions.MINIMUM_COMPATIBLE + IndexVersions.MINIMUM_READONLY_COMPATIBLE ), new BinaryDenseVectorDocValuesField( BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.FLOAT, IndexVersion.current()), diff --git a/server/src/test/java/org/elasticsearch/script/field/vectors/DenseVectorTests.java b/server/src/test/java/org/elasticsearch/script/field/vectors/DenseVectorTests.java index 8a5298777ede0..63d502a248aa8 100644 --- a/server/src/test/java/org/elasticsearch/script/field/vectors/DenseVectorTests.java +++ b/server/src/test/java/org/elasticsearch/script/field/vectors/DenseVectorTests.java @@ -69,7 +69,11 @@ public void testFloatVsListQueryVector() { assertEquals(knn.cosineSimilarity(arrayQV), knn.cosineSimilarity(listQV), 0.001f); assertEquals(knn.cosineSimilarity((Object) listQV), knn.cosineSimilarity((Object) arrayQV), 0.001f); - for (IndexVersion indexVersion : List.of(IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current())) { + for (IndexVersion indexVersion : List.of( + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersion.current() + )) { BytesRef value = BinaryDenseVectorScriptDocValuesTests.mockEncodeDenseVector(docVector, ElementType.FLOAT, indexVersion); BinaryDenseVector bdv = new BinaryDenseVector(docVector, value, dims, indexVersion); diff --git a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java index a474c1dc38c50..d3a3792f605db 100644 --- a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.store.BaseDirectoryWrapper; @@ -245,7 +246,10 @@ protected Engine.Searcher acquireSearcherInternal(String source) { // resultWindow not greater than maxResultWindow and both rescore and sort are not null context1.from(0); DocValueFormat docValueFormat = mock(DocValueFormat.class); - SortAndFormats sortAndFormats = new SortAndFormats(new Sort(), new DocValueFormat[] { docValueFormat }); + SortAndFormats sortAndFormats = new SortAndFormats( + new Sort(new SortField[] { SortField.FIELD_DOC }), + new DocValueFormat[] { docValueFormat } + ); context1.sort(sortAndFormats); RescoreContext rescoreContext = mock(RescoreContext.class); diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java index 02593e41f5d84..0fc1694d39926 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java @@ -2684,7 +2684,8 @@ public void testDfsQueryPhaseRewrite() { service.executeQueryPhase( new QuerySearchRequest(null, context.id(), request, new AggregatedDfs(Map.of(), Map.of(), 10)), new SearchShardTask(42L, "", "", "", null, emptyMap()), - plainActionFuture + plainActionFuture, + TransportVersion.current() ); plainActionFuture.actionGet(); diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 31bcab31ca8a7..d041121b8a96b 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -13,6 +13,8 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.SortField; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -53,9 +55,14 @@ import java.io.IOException; import java.util.Collections; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiFunction; import java.util.function.Predicate; +import static org.elasticsearch.search.SearchService.maybeWrapListenerForStackTrace; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.not; + public class SearchServiceTests extends IndexShardTestCase { public void testCanMatchMatchAll() throws IOException { @@ -117,6 +124,33 @@ public Type getType() { doTestCanMatch(searchRequest, sortField, true, null, false); } + public void testMaybeWrapListenerForStackTrace() { + // Tests that the same listener has stack trace if is not wrapped or does not have stack trace if it is wrapped. + AtomicBoolean isWrapped = new AtomicBoolean(false); + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult searchPhaseResult) { + // noop - we only care about failure scenarios + } + + @Override + public void onFailure(Exception e) { + if (isWrapped.get()) { + assertThat(e.getStackTrace().length, is(0)); + } else { + assertThat(e.getStackTrace().length, is(not(0))); + } + } + }; + Exception e = new Exception(); + e.fillInStackTrace(); + assertThat(e.getStackTrace().length, is(not(0))); + listener.onFailure(e); + listener = maybeWrapListenerForStackTrace(listener, TransportVersion.current(), threadPool); + isWrapped.set(true); + listener.onFailure(e); + } + private void doTestCanMatch( SearchRequest searchRequest, SortField sortField, diff --git a/server/src/test/java/org/elasticsearch/search/retriever/RescorerRetrieverBuilderParsingTests.java b/server/src/test/java/org/elasticsearch/search/retriever/RescorerRetrieverBuilderParsingTests.java new file mode 100644 index 0000000000000..fa83246d90cb2 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/retriever/RescorerRetrieverBuilderParsingTests.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.rescore.QueryRescorerBuilderTests; +import org.elasticsearch.search.rescore.RescorerBuilder; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.usage.SearchUsage; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentParser; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static java.util.Collections.emptyList; + +public class RescorerRetrieverBuilderParsingTests extends AbstractXContentTestCase { + private static List xContentRegistryEntries; + + @BeforeClass + public static void init() { + xContentRegistryEntries = new SearchModule(Settings.EMPTY, emptyList()).getNamedXContents(); + } + + @AfterClass + public static void afterClass() throws Exception { + xContentRegistryEntries = null; + } + + @Override + protected RescorerRetrieverBuilder createTestInstance() { + int num = randomIntBetween(1, 3); + List> rescorers = new ArrayList<>(); + for (int i = 0; i < num; i++) { + rescorers.add(QueryRescorerBuilderTests.randomRescoreBuilder()); + } + return new RescorerRetrieverBuilder(TestRetrieverBuilder.createRandomTestRetrieverBuilder(), rescorers); + } + + @Override + protected RescorerRetrieverBuilder doParseInstance(XContentParser parser) throws IOException { + return (RescorerRetrieverBuilder) RetrieverBuilder.parseTopLevelRetrieverBuilder( + parser, + new RetrieverParserContext(new SearchUsage(), n -> true) + ); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(xContentRegistryEntries); + entries.add( + new NamedXContentRegistry.Entry( + RetrieverBuilder.class, + TestRetrieverBuilder.TEST_SPEC.getName(), + (p, c) -> TestRetrieverBuilder.TEST_SPEC.getParser().fromXContent(p, (RetrieverParserContext) c), + TestRetrieverBuilder.TEST_SPEC.getName().getForRestApiVersion() + ) + ); + return new NamedXContentRegistry(entries); + } +} diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 9dc0263f49aee..8296bc14fd665 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -10,12 +10,10 @@ apply plugin: 'elasticsearch.java' apply plugin: 'com.gradleup.shadow' + import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar configurations { -// all { -// transitive = true -// } hdfs2 hdfs3 consumable("shadowedHdfs2") @@ -27,20 +25,76 @@ dependencies { transitive false } compileOnly "junit:junit:${versions.junit}" - hdfs2 "org.apache.hadoop:hadoop-minicluster:2.8.5" - hdfs3 "org.apache.hadoop:hadoop-minicluster:3.3.1" + def commonExcludes = [ + [group: "org.apache.commons", module: "commons-compress"], + [group: "org.apache.hadoop", module: "hadoop-mapreduce-client-app"], + [group: "org.apache.hadoop", module: "hadoop-mapreduce-client-core"], + [group: "org.apache.hadoop", module: "hadoop-mapreduce-client-hs"], + [group: "org.apache.hadoop", module: "hadoop-mapreduce-client-jobclient"], + [group: "org.apache.hadoop", module: "hadoop-yarn-server-tests"], + [group: "org.apache.httpcomponents", module: "httpclient"], + [group: "org.apache.zookeeper", module: "zookeeper"], + [group: "org.apache.curator", module: "curator-recipes"], + [group: "org.apache.curator", module: "curator-client"], + [group: "org.apache.curator", module: "curator-framework"], + [group: "org.apache.avro", module: "avro"], + [group: "log4j", module: "log4j"], + [group: "io.netty", module: "netty-all"], + [group: "io.netty", module: "netty"], + [group: "com.squareup.okhttp", module: "okhttp"], + [group: "com.google.guava", module: "guava"], + [group: "com.google.code.gson", module: "gson"], + [group: "javax.servlet.jsp", module: "jsp-api"], + [group: "org.fusesource.leveldbjni", module: "leveldbjni-all"], + [group: "commons-cli", module: "commons-cli"], + [group: "org.mortbay.jetty", module: "servlet-api"], + [group: "commons-logging", module: "commons-logging"], + [group: "org.slf4j", module: "slf4j-log4j12"], + [group: "commons-codec", module: "commons-codec"], + [group: "com.sun.jersey", module: "jersey-core"], + [group: "com.sun.jersey", module: "jersey-json"], + [group: "com.google.code.findbugs", module: "jsr305"], + [group: "com.sun.jersey", module: "jersey-json"], + [group: "com.nimbusds", module: "nimbus-jose-jwt"], + [group: "com.jcraft", module: "jsch"], + [group: "org.slf4j", module: "slf4j-api"], + ] + + hdfs2("org.apache.hadoop:hadoop-minicluster:2.8.5") { + commonExcludes.each { exclude it } + exclude group: "org.apache.commons", module: "commons-math3" + exclude group: "xmlenc", module: "xmlenc" + exclude group: "net.java.dev.jets3t", module: "jets3t" + exclude group: "org.apache.directory.server", module: "apacheds-i18n" + exclude group: "xerces", module: "xercesImpl" + } + + hdfs3("org.apache.hadoop:hadoop-minicluster:3.3.1") { + commonExcludes.each { exclude it } + exclude group: "dnsjava", module: "dnsjava" + exclude group: "com.google.inject.extensions", module: "guice-servlet" + exclude group: "com.google.inject", module: "guice" + exclude group: "com.microsoft.sqlserver", module: "mssql-jdbc" + exclude group: "com.sun.jersey.contribs", module: "jersey-guice" + exclude group: "com.zaxxer", module: "HikariCP-java7" + exclude group: "com.sun.jersey", module: "jersey-server" + exclude group: "org.bouncycastle", module: "bcpkix-jdk15on" + exclude group: "org.bouncycastle", module: "bcprov-jdk15on" + exclude group: "org.ehcache", module: "ehcache" + exclude group: "org.apache.geronimo.specs", module: "geronimo-jcache_1.0_spec" + exclude group: "org.xerial.snappy", module: "snappy-java" + } } tasks.named("shadowJar").configure { archiveClassifier.set("hdfs3") // fix issues with signed jars - relocate("org.apache.hadoop", "fixture.hdfs3.org.apache.hadoop") { exclude "org.apache.hadoop.hdfs.protocol.ClientProtocol" exclude "org.apache.hadoop.ipc.StandbyException" } - configurations << project.configurations.hdfs3 + configurations.add(project.configurations.hdfs3) } def hdfs2Jar = tasks.register("hdfs2jar", ShadowJar) { @@ -50,26 +104,15 @@ def hdfs2Jar = tasks.register("hdfs2jar", ShadowJar) { } archiveClassifier.set("hdfs2") from sourceSets.main.output - configurations << project.configurations.hdfs2 + configurations.add(project.configurations.hdfs2) } tasks.withType(ShadowJar).configureEach { dependencies { -// exclude(dependency('commons-io:commons-io:2.8.0')) exclude(dependency("com.carrotsearch.randomizedtesting:randomizedtesting-runner:.*")) exclude(dependency("junit:junit:.*")) - exclude(dependency("org.slf4j:slf4j-api:.*")) - exclude(dependency("com.google.guava:guava:.*")) - exclude(dependency("org.apache.commons:commons-compress:.*")) - exclude(dependency("commons-logging:commons-logging:.*")) - exclude(dependency("commons-codec:commons-codec:.*")) - exclude(dependency("org.apache.httpcomponents:httpclient:.*")) exclude(dependency("org.apache.httpcomponents:httpcore:.*")) exclude(dependency("org.apache.logging.log4j:log4j-1.2-api:.*")) - exclude(dependency("log4j:log4j:.*")) - exclude(dependency("io.netty:.*:.*")) - exclude(dependency("com.nimbusds:nimbus-jose-jwt:.*")) - exclude(dependency("commons-cli:commons-cli:1.2")) exclude(dependency("net.java.dev.jna:jna:.*")) exclude(dependency("org.objenesis:objenesis:.*")) exclude(dependency('com.fasterxml.jackson.core:.*:.*')) diff --git a/test/framework/src/integTest/java/org/elasticsearch/test/test/InternalClusterForbiddenSettingIT.java b/test/framework/src/integTest/java/org/elasticsearch/test/test/InternalClusterForbiddenSettingIT.java index d13450fbb52dd..2033743354f34 100644 --- a/test/framework/src/integTest/java/org/elasticsearch/test/test/InternalClusterForbiddenSettingIT.java +++ b/test/framework/src/integTest/java/org/elasticsearch/test/test/InternalClusterForbiddenSettingIT.java @@ -26,7 +26,7 @@ protected boolean forbidPrivateIndexSettings() { } public void testRestart() throws Exception { - IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersion.current()); + IndexVersion version = IndexVersionUtils.randomPreviousCompatibleWriteVersion(random(), IndexVersion.current()); // create / delete an index with forbidden setting prepareCreate("test").setSettings(settings(version).build()).get(); indicesAdmin().prepareDelete("test").get(); @@ -38,7 +38,7 @@ public void testRestart() throws Exception { } public void testRollingRestart() throws Exception { - IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersion.current()); + IndexVersion version = IndexVersionUtils.randomPreviousCompatibleWriteVersion(random(), IndexVersion.current()); // create / delete an index with forbidden setting prepareCreate("test").setSettings(settings(version).build()).get(); indicesAdmin().prepareDelete("test").get(); diff --git a/test/framework/src/main/java/org/elasticsearch/index/KnownIndexVersions.java b/test/framework/src/main/java/org/elasticsearch/index/KnownIndexVersions.java index 5cdb3f1808a38..4f559a5f3eaef 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/KnownIndexVersions.java +++ b/test/framework/src/main/java/org/elasticsearch/index/KnownIndexVersions.java @@ -19,6 +19,7 @@ public class KnownIndexVersions { * A sorted list of all known index versions */ public static final List ALL_VERSIONS = List.copyOf(IndexVersions.getAllVersions()); + /** * A sorted list of all known index versions that can be written to */ diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java index 449ecc099412f..580eb6eacb27e 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentBuilder; @@ -142,4 +143,88 @@ public final void testFixedMetaFieldsAreNotConfigurable() throws IOException { ); assertEquals("Failed to parse mapping: " + fieldName() + " is not configurable", exception.getMessage()); } + + public void testTypeAndFriendsAreAcceptedBefore_8_6_0() throws IOException { + assumeTrue("Metadata field " + fieldName() + " isn't configurable", isConfigurable()); + IndexVersion previousVersion = IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_6_0); + // we randomly also pick read-only versions to test that we can still parse the parameters for them + IndexVersion version = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersionUtils.getLowestReadCompatibleVersion(), + previousVersion + ); + assumeTrue("Metadata field " + fieldName() + " is not supported on version " + version, isSupportedOn(version)); + MapperService mapperService = createMapperService(version, mapping(b -> {})); + // these parameters were previously silently ignored, they will still be ignored in existing indices + String[] unsupportedParameters = new String[] { "fields", "copy_to", "boost", "type" }; + for (String param : unsupportedParameters) { + String mappingAsString = "{\n" + + " \"_doc\" : {\n" + + " \"" + + fieldName() + + "\" : {\n" + + " \"" + + param + + "\" : \"any\"\n" + + " }\n" + + " }\n" + + "}"; + assertNotNull(mapperService.parseMapping("_doc", MergeReason.MAPPING_UPDATE, new CompressedXContent(mappingAsString))); + } + } + + public void testTypeAndFriendsAreDeprecatedFrom_8_6_0_TO_9_0_0() throws IOException { + assumeTrue("Metadata field " + fieldName() + " isn't configurable", isConfigurable()); + IndexVersion previousVersion = IndexVersionUtils.getPreviousVersion(IndexVersions.UPGRADE_TO_LUCENE_10_0_0); + IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_6_0, previousVersion); + assumeTrue("Metadata field " + fieldName() + " is not supported on version " + version, isSupportedOn(version)); + MapperService mapperService = createMapperService(version, mapping(b -> {})); + // these parameters were deprecated, they now should throw an error in new indices + String[] unsupportedParameters = new String[] { "fields", "copy_to", "boost", "type" }; + for (String param : unsupportedParameters) { + String mappingAsString = "{\n" + + " \"_doc\" : {\n" + + " \"" + + fieldName() + + "\" : {\n" + + " \"" + + param + + "\" : \"any\"\n" + + " }\n" + + " }\n" + + "}"; + assertNotNull(mapperService.parseMapping("_doc", MergeReason.MAPPING_UPDATE, new CompressedXContent(mappingAsString))); + assertWarnings("Parameter [" + param + "] has no effect on metadata field [" + fieldName() + "] and will be removed in future"); + } + } + + public void testTypeAndFriendsThrow_After_9_0_0() throws IOException { + assumeTrue("Metadata field " + fieldName() + " isn't configurable", isConfigurable()); + IndexVersion version = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.UPGRADE_TO_LUCENE_10_0_0, + IndexVersion.current() + ); + assumeTrue("Metadata field " + fieldName() + " is not supported on version " + version, isSupportedOn(version)); + MapperService mapperService = createMapperService(version, mapping(b -> {})); + // these parameters were previously silently ignored, they are now deprecated in new indices + String[] unsupportedParameters = new String[] { "fields", "copy_to", "boost", "type" }; + for (String param : unsupportedParameters) { + String mappingAsString = "{\n" + + " \"_doc\" : {\n" + + " \"" + + fieldName() + + "\" : {\n" + + " \"" + + param + + "\" : \"any\"\n" + + " }\n" + + " }\n" + + "}"; + expectThrows( + MapperParsingException.class, + () -> mapperService.parseMapping("_doc", MergeReason.MAPPING_UPDATE, new CompressedXContent(mappingAsString)) + ); + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index 20cb66affddee..d239c6453a7fe 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -241,6 +241,10 @@ protected static IndexSettings indexSettings() { return serviceHolder.idxSettings; } + protected static MapperService mapperService() { + return serviceHolder.mapperService; + } + protected static String expectedFieldName(String builderFieldName) { return ALIAS_TO_CONCRETE_FIELD_NAME.getOrDefault(builderFieldName, builderFieldName); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/index/IndexVersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/index/IndexVersionUtils.java index 592cffac33552..667149e4bdd3e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/index/IndexVersionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/index/IndexVersionUtils.java @@ -122,11 +122,21 @@ public static IndexVersion getNextVersion(IndexVersion version) { /** Returns a random {@code IndexVersion} that is compatible with {@link IndexVersion#current()} */ public static IndexVersion randomCompatibleVersion(Random random) { + return randomVersionBetween(random, IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current()); + } + + /** Returns a random {@code IndexVersion} that is compatible with {@link IndexVersion#current()} and can be written to */ + public static IndexVersion randomCompatibleWriteVersion(Random random) { return randomVersionBetween(random, IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()); } /** Returns a random {@code IndexVersion} that is compatible with the previous version to {@code version} */ public static IndexVersion randomPreviousCompatibleVersion(Random random, IndexVersion version) { + return randomVersionBetween(random, IndexVersions.MINIMUM_READONLY_COMPATIBLE, getPreviousVersion(version)); + } + + /** Returns a random {@code IndexVersion} that is compatible with the previous version to {@code version} and can be written to */ + public static IndexVersion randomPreviousCompatibleWriteVersion(Random random, IndexVersion version) { return randomVersionBetween(random, IndexVersions.MINIMUM_COMPATIBLE, getPreviousVersion(version)); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/jar/JarUtils.java b/test/framework/src/main/java/org/elasticsearch/test/jar/JarUtils.java index e5bdd66e949f7..0da392cb7fb01 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/jar/JarUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/jar/JarUtils.java @@ -9,13 +9,24 @@ package org.elasticsearch.test.jar; +import org.elasticsearch.test.PrivilegedOperations.ClosableURLClassLoader; + import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.OutputStream; +import java.lang.module.Configuration; +import java.lang.module.ModuleFinder; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.URLClassLoader; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.List; import java.util.Map; +import java.util.Set; import java.util.jar.JarEntry; import java.util.jar.JarOutputStream; import java.util.jar.Manifest; @@ -85,6 +96,28 @@ public static void createJarWithEntriesUTF(Path jarfile, Map ent createJarWithEntries(jarfile, map); } + /** + * Creates a class loader for the given jar file. + * @param path Path to the jar file to load + * @return A URLClassLoader that will load classes from the jar. It should be closed when no longer needed. + */ + public static ClosableURLClassLoader loadJar(Path path) { + try { + URL[] urls = new URL[] { path.toUri().toURL() }; + return new ClosableURLClassLoader(URLClassLoader.newInstance(urls, JarUtils.class.getClassLoader())); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + public static ModuleLayer.Controller loadModule(Path path, ClassLoader loader, String name) { + var finder = ModuleFinder.of(path.getParent()); + var cf = Configuration.resolveAndBind(finder, List.of(ModuleLayer.boot().configuration()), ModuleFinder.of(), Set.of(name)); + return AccessController.doPrivileged( + (PrivilegedAction) () -> ModuleLayer.defineModulesWithOneLoader(cf, List.of(ModuleLayer.boot()), loader) + ); + } + @FunctionalInterface interface UncheckedIOFunction { R apply(T t) throws IOException; diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchErrorTraceIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchErrorTraceIT.java new file mode 100644 index 0000000000000..39a6fa1e4b34f --- /dev/null +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchErrorTraceIT.java @@ -0,0 +1,222 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.search; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.transport.TransportMessageListener; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; + +public class AsyncSearchErrorTraceIT extends ESIntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + @Override + protected Collection> nodePlugins() { + return List.of(AsyncSearch.class); + } + + private AtomicBoolean transportMessageHasStackTrace; + + @Before + private void setupMessageListener() { + internalCluster().getDataNodeInstances(TransportService.class).forEach(ts -> { + ts.addMessageListener(new TransportMessageListener() { + @Override + public void onResponseSent(long requestId, String action, Exception error) { + TransportMessageListener.super.onResponseSent(requestId, action, error); + if (action.startsWith("indices:data/read/search")) { + Optional throwable = ExceptionsHelper.unwrapCausesAndSuppressed( + error, + t -> t.getStackTrace().length > 0 + ); + transportMessageHasStackTrace.set(throwable.isPresent()); + } + } + }); + }); + } + + private void setupIndexWithDocs() { + createIndex("test1", "test2"); + indexRandom( + true, + prepareIndex("test1").setId("1").setSource("field", "foo"), + prepareIndex("test2").setId("10").setSource("field", 5) + ); + refresh(); + } + + public void testAsyncSearchFailingQueryErrorTraceDefault() throws IOException, InterruptedException { + transportMessageHasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_async_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + searchRequest.addParameter("keep_on_completion", "true"); + searchRequest.addParameter("wait_for_completion_timeout", "0ms"); + Map responseEntity = performRequestAndGetResponseEntityAfterDelay(searchRequest, TimeValue.ZERO); + String asyncExecutionId = (String) responseEntity.get("id"); + Request request = new Request("GET", "/_async_search/" + asyncExecutionId); + while (responseEntity.get("is_running") instanceof Boolean isRunning && isRunning) { + responseEntity = performRequestAndGetResponseEntityAfterDelay(request, TimeValue.timeValueSeconds(1L)); + } + // check that the stack trace was not sent from the data node to the coordinating node + assertFalse(transportMessageHasStackTrace.get()); + } + + public void testAsyncSearchFailingQueryErrorTraceTrue() throws IOException, InterruptedException { + transportMessageHasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_async_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + searchRequest.addParameter("error_trace", "true"); + searchRequest.addParameter("keep_on_completion", "true"); + searchRequest.addParameter("wait_for_completion_timeout", "0ms"); + Map responseEntity = performRequestAndGetResponseEntityAfterDelay(searchRequest, TimeValue.ZERO); + String asyncExecutionId = (String) responseEntity.get("id"); + Request request = new Request("GET", "/_async_search/" + asyncExecutionId); + request.addParameter("error_trace", "true"); + while (responseEntity.get("is_running") instanceof Boolean isRunning && isRunning) { + responseEntity = performRequestAndGetResponseEntityAfterDelay(request, TimeValue.timeValueSeconds(1L)); + } + // check that the stack trace was sent from the data node to the coordinating node + assertTrue(transportMessageHasStackTrace.get()); + } + + public void testAsyncSearchFailingQueryErrorTraceFalse() throws IOException, InterruptedException { + transportMessageHasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_async_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + searchRequest.addParameter("error_trace", "false"); + searchRequest.addParameter("keep_on_completion", "true"); + searchRequest.addParameter("wait_for_completion_timeout", "0ms"); + Map responseEntity = performRequestAndGetResponseEntityAfterDelay(searchRequest, TimeValue.ZERO); + String asyncExecutionId = (String) responseEntity.get("id"); + Request request = new Request("GET", "/_async_search/" + asyncExecutionId); + request.addParameter("error_trace", "false"); + while (responseEntity.get("is_running") instanceof Boolean isRunning && isRunning) { + responseEntity = performRequestAndGetResponseEntityAfterDelay(request, TimeValue.timeValueSeconds(1L)); + } + // check that the stack trace was not sent from the data node to the coordinating node + assertFalse(transportMessageHasStackTrace.get()); + } + + public void testAsyncSearchFailingQueryErrorTraceFalseOnSubmitAndTrueOnGet() throws IOException, InterruptedException { + transportMessageHasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_async_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + searchRequest.addParameter("error_trace", "false"); + searchRequest.addParameter("keep_on_completion", "true"); + searchRequest.addParameter("wait_for_completion_timeout", "0ms"); + Map responseEntity = performRequestAndGetResponseEntityAfterDelay(searchRequest, TimeValue.ZERO); + String asyncExecutionId = (String) responseEntity.get("id"); + Request request = new Request("GET", "/_async_search/" + asyncExecutionId); + request.addParameter("error_trace", "true"); + while (responseEntity.get("is_running") instanceof Boolean isRunning && isRunning) { + responseEntity = performRequestAndGetResponseEntityAfterDelay(request, TimeValue.timeValueSeconds(1L)); + } + // check that the stack trace was not sent from the data node to the coordinating node + assertFalse(transportMessageHasStackTrace.get()); + } + + public void testAsyncSearchFailingQueryErrorTraceTrueOnSubmitAndFalseOnGet() throws IOException, InterruptedException { + transportMessageHasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_async_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + searchRequest.addParameter("error_trace", "true"); + searchRequest.addParameter("keep_on_completion", "true"); + searchRequest.addParameter("wait_for_completion_timeout", "0ms"); + Map responseEntity = performRequestAndGetResponseEntityAfterDelay(searchRequest, TimeValue.ZERO); + String asyncExecutionId = (String) responseEntity.get("id"); + Request request = new Request("GET", "/_async_search/" + asyncExecutionId); + request.addParameter("error_trace", "false"); + while (responseEntity.get("is_running") instanceof Boolean isRunning && isRunning) { + responseEntity = performRequestAndGetResponseEntityAfterDelay(request, TimeValue.timeValueSeconds(1L)); + } + // check that the stack trace was sent from the data node to the coordinating node + assertTrue(transportMessageHasStackTrace.get()); + } + + private Map performRequestAndGetResponseEntityAfterDelay(Request r, TimeValue sleep) throws IOException, + InterruptedException { + Thread.sleep(sleep.millis()); + Response response = getRestClient().performRequest(r); + XContentType entityContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); + return XContentHelper.convertToMap(entityContentType.xContent(), response.getEntity().getContent(), false); + } +} diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchAction.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchAction.java index bd09d8f7740a1..952febd46c34c 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchAction.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchAction.java @@ -55,6 +55,9 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + if (client.threadPool() != null && client.threadPool().getThreadContext() != null) { + client.threadPool().getThreadContext().setErrorTraceTransportHeader(request); + } SubmitAsyncSearchRequest submit = new SubmitAsyncSearchRequest(); IntConsumer setSize = size -> submit.getSearchRequest().source().size(size); // for simplicity, we share parsing with ordinary search. That means a couple of unsupported parameters, like scroll diff --git a/x-pack/plugin/ccr/src/javaRestTest/java/org/elasticsearch/xpack/ccr/rest/ShardChangesRestIT.java b/x-pack/plugin/ccr/src/javaRestTest/java/org/elasticsearch/xpack/ccr/rest/ShardChangesRestIT.java index e5dfea7b772f2..4c61904475093 100644 --- a/x-pack/plugin/ccr/src/javaRestTest/java/org/elasticsearch/xpack/ccr/rest/ShardChangesRestIT.java +++ b/x-pack/plugin/ccr/src/javaRestTest/java/org/elasticsearch/xpack/ccr/rest/ShardChangesRestIT.java @@ -26,6 +26,9 @@ import org.junit.ClassRule; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; import java.util.List; import java.util.Locale; import java.util.Map; @@ -33,11 +36,14 @@ public class ShardChangesRestIT extends ESRestTestCase { private static final String CCR_SHARD_CHANGES_ENDPOINT = "/%s/ccr/shard_changes"; private static final String BULK_INDEX_ENDPOINT = "/%s/_bulk"; + private static final String DATA_STREAM_ENDPOINT = "/_data_stream/%s"; + private static final String INDEX_TEMPLATE_ENDPOINT = "/_index_template/%s"; private static final String[] SHARD_RESPONSE_FIELDS = new String[] { "took_in_millis", "operations", "shard_id", + "index_abstraction", "index", "settings_version", "max_seq_no_of_updates_or_deletes", @@ -46,6 +52,11 @@ public class ShardChangesRestIT extends ESRestTestCase { "aliases_version", "max_seq_no", "global_checkpoint" }; + + private static final String BULK_INDEX_TEMPLATE = """ + { "index": { "op_type": "create" } } + { "@timestamp": "%s", "name": "%s" } + """;; private static final String[] NAMES = { "skywalker", "leia", "obi-wan", "yoda", "chewbacca", "r2-d2", "c-3po", "darth-vader" }; @ClassRule public static ElasticsearchCluster cluster = ElasticsearchCluster.local() @@ -99,13 +110,86 @@ public void testShardChangesDefaultParams() throws IOException { createIndex(indexName, settings, mappings); assertTrue(indexExists(indexName)); - assertOK(client().performRequest(bulkRequest(indexName, randomIntBetween(10, 20)))); + assertOK(bulkIndex(indexName, randomIntBetween(10, 20))); final Request shardChangesRequest = new Request("GET", shardChangesEndpoint(indexName)); final Response response = client().performRequest(shardChangesRequest); assertOK(response); assertShardChangesResponse( - XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false) + XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false), + indexName + ); + } + + public void testDataStreamShardChangesDefaultParams() throws IOException { + final String templateName = randomAlphanumericOfLength(8).toLowerCase(Locale.ROOT); + assertOK(createIndexTemplate(templateName, """ + { + "index_patterns": [ "test-*-*" ], + "data_stream": {}, + "priority": 100, + "template": { + "mappings": { + "properties": { + "@timestamp": { + "type": "date" + }, + "name": { + "type": "keyword" + } + } + } + } + }""")); + + final String dataStreamName = "test-" + + randomAlphanumericOfLength(5).toLowerCase(Locale.ROOT) + + "-" + + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + assertOK(createDataStream(dataStreamName)); + + assertOK(bulkIndex(dataStreamName, randomIntBetween(10, 20))); + + final Request shardChangesRequest = new Request("GET", shardChangesEndpoint(dataStreamName)); + final Response response = client().performRequest(shardChangesRequest); + assertOK(response); + assertShardChangesResponse( + XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false), + dataStreamName + ); + } + + public void testIndexAliasShardChangesDefaultParams() throws IOException { + final String indexName = randomAlphanumericOfLength(10).toLowerCase(Locale.ROOT); + final String aliasName = randomAlphanumericOfLength(8).toLowerCase(Locale.ROOT); + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "1s") + .build(); + final String mappings = """ + { + "properties": { + "name": { + "type": "keyword" + } + } + } + """; + createIndex(indexName, settings, mappings); + assertTrue(indexExists(indexName)); + + final Request putAliasRequest = new Request("PUT", "/" + indexName + "/_alias/" + aliasName); + assertOK(client().performRequest(putAliasRequest)); + + assertOK(bulkIndex(aliasName, randomIntBetween(10, 20))); + + final Request shardChangesRequest = new Request("GET", shardChangesEndpoint(aliasName)); + final Response response = client().performRequest(shardChangesRequest); + assertOK(response); + assertShardChangesResponse( + XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false), + aliasName ); } @@ -121,7 +205,7 @@ public void testShardChangesWithAllParameters() throws IOException { ); assertTrue(indexExists(indexName)); - assertOK(client().performRequest(bulkRequest(indexName, randomIntBetween(100, 200)))); + assertOK(bulkIndex(indexName, randomIntBetween(100, 200))); final Request shardChangesRequest = new Request("GET", shardChangesEndpoint(indexName)); shardChangesRequest.addParameter("from_seq_no", "0"); @@ -132,7 +216,8 @@ public void testShardChangesWithAllParameters() throws IOException { final Response response = client().performRequest(shardChangesRequest); assertOK(response); assertShardChangesResponse( - XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false) + XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false), + indexName ); } @@ -148,7 +233,7 @@ public void testShardChangesMultipleRequests() throws IOException { ); assertTrue(indexExists(indexName)); - assertOK(client().performRequest(bulkRequest(indexName, randomIntBetween(100, 200)))); + assertOK(bulkIndex(indexName, randomIntBetween(100, 200))); final Request firstRequest = new Request("GET", shardChangesEndpoint(indexName)); firstRequest.addParameter("from_seq_no", "0"); @@ -159,7 +244,8 @@ public void testShardChangesMultipleRequests() throws IOException { final Response firstResponse = client().performRequest(firstRequest); assertOK(firstResponse); assertShardChangesResponse( - XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(firstResponse.getEntity()), false) + XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(firstResponse.getEntity()), false), + indexName ); final Request secondRequest = new Request("GET", shardChangesEndpoint(indexName)); @@ -171,7 +257,8 @@ public void testShardChangesMultipleRequests() throws IOException { final Response secondResponse = client().performRequest(secondRequest); assertOK(secondResponse); assertShardChangesResponse( - XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(secondResponse.getEntity()), false) + XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(secondResponse.getEntity()), false), + indexName ); } @@ -231,17 +318,36 @@ public void testShardChangesMissingIndex() throws IOException { assertResponseException(ex, RestStatus.BAD_REQUEST, "Failed to process shard changes for index [" + indexName + "]"); } - private static Request bulkRequest(final String indexName, int numberOfDocuments) { + private static Response bulkIndex(final String indexName, int numberOfDocuments) throws IOException { final StringBuilder sb = new StringBuilder(); + long timestamp = System.currentTimeMillis(); for (int i = 0; i < numberOfDocuments; i++) { - sb.append(String.format(Locale.ROOT, "{ \"index\": { \"_id\": \"%d\" } }\n{ \"name\": \"%s\" }\n", i + 1, randomFrom(NAMES))); + sb.append( + String.format( + Locale.ROOT, + BULK_INDEX_TEMPLATE, + Instant.ofEpochMilli(timestamp).atOffset(ZoneOffset.UTC).format(DateTimeFormatter.ISO_OFFSET_DATE_TIME), + randomFrom(NAMES) + ) + ); + timestamp += 1000; // 1 second } final Request request = new Request("POST", bulkEndpoint(indexName)); request.setJsonEntity(sb.toString()); request.addParameter("refresh", "true"); - return request; + return client().performRequest(request); + } + + private Response createDataStream(final String dataStreamName) throws IOException { + return client().performRequest(new Request("PUT", dataStreamEndpoint(dataStreamName))); + } + + private static Response createIndexTemplate(final String templateName, final String mappings) throws IOException { + final Request request = new Request("PUT", indexTemplateEndpoint(templateName)); + request.setJsonEntity(mappings); + return client().performRequest(request); } private static String shardChangesEndpoint(final String indexName) { @@ -252,16 +358,28 @@ private static String bulkEndpoint(final String indexName) { return String.format(Locale.ROOT, BULK_INDEX_ENDPOINT, indexName); } + private static String dataStreamEndpoint(final String dataStreamName) { + return String.format(Locale.ROOT, DATA_STREAM_ENDPOINT, dataStreamName); + } + + private static String indexTemplateEndpoint(final String templateName) { + return String.format(Locale.ROOT, INDEX_TEMPLATE_ENDPOINT, templateName); + } + private void assertResponseException(final ResponseException ex, final RestStatus restStatus, final String error) { assertEquals(restStatus.getStatus(), ex.getResponse().getStatusLine().getStatusCode()); assertThat(ex.getMessage(), Matchers.containsString(error)); } - private void assertShardChangesResponse(final Map shardChangesResponseBody) { + private void assertShardChangesResponse(final Map shardChangesResponseBody, final String indexAbstractionName) { for (final String fieldName : SHARD_RESPONSE_FIELDS) { final Object fieldValue = shardChangesResponseBody.get(fieldName); assertNotNull("Field " + fieldName + " is missing or has a null value.", fieldValue); + if ("index_abstraction".equals(fieldName)) { + assertEquals(indexAbstractionName, fieldValue); + } + if ("operations".equals(fieldName)) { if (fieldValue instanceof List operationsList) { assertFalse("Field 'operations' is empty.", operationsList.isEmpty()); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestShardChangesAction.java index 84171ebce162f..4a1d26d05a980 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestShardChangesAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -32,6 +34,7 @@ import java.util.Arrays; import java.util.Comparator; import java.util.List; +import java.util.Locale; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -42,10 +45,14 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; /** - * A REST handler that retrieves shard changes in a specific index whose name is provided as a parameter. - * It handles GET requests to the "/{index}/ccr/shard_changes" endpoint retrieving shard-level changes, - * such as translog operations, mapping version, settings version, aliases version, the global checkpoint, - * maximum sequence number and maximum sequence number of updates or deletes. + * A REST handler that retrieves shard changes in a specific index, data stream or alias whose name is + * provided as a parameter. It handles GET requests to the "/{index}/ccr/shard_changes" endpoint retrieving + * shard-level changes, such as Translog operations, mapping version, settings version, aliases version, + * the global checkpoint, maximum sequence number and maximum sequence number of updates or deletes. + *

+ * In the case of a data stream, the first backing index is considered the target for retrieving shard changes. + * In the case of an alias, the first index that the alias points to is considered the target for retrieving + * shard changes. *

* Note: This handler is only available for snapshot builds. */ @@ -84,32 +91,36 @@ public List routes() { */ @Override protected RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) throws IOException { - final var indexName = restRequest.param(INDEX_PARAM_NAME); + final var indexAbstractionName = restRequest.param(INDEX_PARAM_NAME); final var fromSeqNo = restRequest.paramAsLong(FROM_SEQ_NO_PARAM_NAME, DEFAULT_FROM_SEQ_NO); final var maxBatchSize = restRequest.paramAsSize(MAX_BATCH_SIZE_PARAM_NAME, DEFAULT_MAX_BATCH_SIZE); final var pollTimeout = restRequest.paramAsTime(POLL_TIMEOUT_PARAM_NAME, DEFAULT_POLL_TIMEOUT); final var maxOperationsCount = restRequest.paramAsInt(MAX_OPERATIONS_COUNT_PARAM_NAME, DEFAULT_MAX_OPERATIONS_COUNT); - final CompletableFuture indexUUIDCompletableFuture = asyncGetIndexUUID( + // NOTE: we first retrieve the concrete index name in case we are dealing with an alias or data stream. + // Then we use the concrete index name to retrieve the index UUID and shard stats. + final CompletableFuture indexNameCompletableFuture = asyncGetIndexName( client, - indexName, + indexAbstractionName, client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME) ); - final CompletableFuture shardStatsCompletableFuture = asyncShardStats( - client, - indexName, - client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME) + final CompletableFuture indexUUIDCompletableFuture = indexNameCompletableFuture.thenCompose( + concreteIndexName -> asyncGetIndexUUID(client, concreteIndexName, client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME)) + ); + final CompletableFuture shardStatsCompletableFuture = indexNameCompletableFuture.thenCompose( + concreteIndexName -> asyncShardStats(client, concreteIndexName, client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME)) ); return channel -> CompletableFuture.allOf(indexUUIDCompletableFuture, shardStatsCompletableFuture).thenRun(() -> { try { + final String concreteIndexName = indexNameCompletableFuture.get(DEFAULT_TIMEOUT_SECONDS, TimeUnit.SECONDS); final String indexUUID = indexUUIDCompletableFuture.get(DEFAULT_TIMEOUT_SECONDS, TimeUnit.SECONDS); final ShardStats shardStats = shardStatsCompletableFuture.get(DEFAULT_TIMEOUT_SECONDS, TimeUnit.SECONDS); final ShardId shardId = shardStats.getShardRouting().shardId(); final String expectedHistoryUUID = shardStats.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY); final ShardChangesAction.Request shardChangesRequest = shardChangesRequest( - indexName, + concreteIndexName, indexUUID, shardId, expectedHistoryUUID, @@ -121,7 +132,12 @@ protected RestChannelConsumer prepareRequest(final RestRequest restRequest, fina client.execute(ShardChangesAction.INSTANCE, shardChangesRequest, new RestActionListener<>(channel) { @Override protected void processResponse(final ShardChangesAction.Response response) { - channel.sendResponse(new RestResponse(RestStatus.OK, shardChangesResponseToXContent(response, indexName, shardId))); + channel.sendResponse( + new RestResponse( + RestStatus.OK, + shardChangesResponseToXContent(response, indexAbstractionName, concreteIndexName, shardId) + ) + ); } }); @@ -132,7 +148,12 @@ protected void processResponse(final ShardChangesAction.Response response) { throw new IllegalStateException("Timeout while waiting for shard stats or index UUID", te); } }).exceptionally(ex -> { - channel.sendResponse(new RestResponse(RestStatus.BAD_REQUEST, "Failed to process shard changes for index [" + indexName + "]")); + channel.sendResponse( + new RestResponse( + RestStatus.BAD_REQUEST, + "Failed to process shard changes for index [" + indexAbstractionName + "] " + ex.getMessage() + ) + ); return null; }); } @@ -175,17 +196,20 @@ private static ShardChangesAction.Request shardChangesRequest( * Converts the response to XContent JSOn format. * * @param response The ShardChangesAction response. - * @param indexName The name of the index. + * @param indexAbstractionName The name of the index abstraction. + * @param concreteIndexName The name of the index. * @param shardId The ShardId. */ private static XContentBuilder shardChangesResponseToXContent( final ShardChangesAction.Response response, - final String indexName, + final String indexAbstractionName, + final String concreteIndexName, final ShardId shardId ) { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { builder.startObject(); - builder.field("index", indexName); + builder.field("index_abstraction", indexAbstractionName); + builder.field("index", concreteIndexName); builder.field("shard_id", shardId); builder.field("mapping_version", response.getMappingVersion()); builder.field("settings_version", response.getSettingsVersion()); @@ -249,26 +273,60 @@ private static CompletableFuture supplyAsyncTask( }, executorService); } + /** + * Asynchronously retrieves the index name for a given index, alias or data stream. + * If the name represents a data stream, the name of the first backing index is returned. + * If the name represents an alias, the name of the first index that the alias points to is returned. + * + * @param client The NodeClient for executing the asynchronous request. + * @param indexAbstractionName The name of the index, alias or data stream. + * @return A CompletableFuture that completes with the retrieved index name. + */ + private static CompletableFuture asyncGetIndexName( + final NodeClient client, + final String indexAbstractionName, + final ExecutorService executorService + ) { + return supplyAsyncTask(() -> { + final ClusterState clusterState = client.admin() + .cluster() + .prepareState(new TimeValue(DEFAULT_TIMEOUT_SECONDS, TimeUnit.SECONDS)) + .get(GET_INDEX_UUID_TIMEOUT) + .getState(); + final IndexAbstraction indexAbstraction = clusterState.metadata().getIndicesLookup().get(indexAbstractionName); + if (indexAbstraction == null) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "Invalid index or data stream name [%s]", indexAbstractionName) + ); + } + if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM + || indexAbstraction.getType() == IndexAbstraction.Type.ALIAS) { + return indexAbstraction.getIndices().getFirst().getName(); + } + return indexAbstractionName; + }, executorService, "Error while retrieving index name for index or data stream [" + indexAbstractionName + "]"); + } + /** * Asynchronously retrieves the shard stats for a given index using an executor service. * * @param client The NodeClient for executing the asynchronous request. - * @param indexName The name of the index for which to retrieve shard statistics. + * @param concreteIndexName The name of the index for which to retrieve shard statistics. * @param executorService The executorService service for executing the asynchronous task. * @return A CompletableFuture that completes with the retrieved ShardStats. * @throws ElasticsearchException If an error occurs while retrieving shard statistics. */ private static CompletableFuture asyncShardStats( final NodeClient client, - final String indexName, + final String concreteIndexName, final ExecutorService executorService ) { return supplyAsyncTask( - () -> Arrays.stream(client.admin().indices().prepareStats(indexName).clear().get(SHARD_STATS_TIMEOUT).getShards()) + () -> Arrays.stream(client.admin().indices().prepareStats(concreteIndexName).clear().get(SHARD_STATS_TIMEOUT).getShards()) .max(Comparator.comparingLong(shardStats -> shardStats.getCommitStats().getGeneration())) - .orElseThrow(() -> new ElasticsearchException("Unable to retrieve shard stats for index: " + indexName)), + .orElseThrow(() -> new ElasticsearchException("Unable to retrieve shard stats for index: " + concreteIndexName)), executorService, - "Error while retrieving shard stats for index [" + indexName + "]" + "Error while retrieving shard stats for index [" + concreteIndexName + "]" ); } @@ -276,25 +334,25 @@ private static CompletableFuture asyncShardStats( * Asynchronously retrieves the index UUID for a given index using an executor service. * * @param client The NodeClient for executing the asynchronous request. - * @param indexName The name of the index for which to retrieve the index UUID. + * @param concreteIndexName The name of the index for which to retrieve the index UUID. * @param executorService The executorService service for executing the asynchronous task. * @return A CompletableFuture that completes with the retrieved index UUID. * @throws ElasticsearchException If an error occurs while retrieving the index UUID. */ private static CompletableFuture asyncGetIndexUUID( final NodeClient client, - final String indexName, + final String concreteIndexName, final ExecutorService executorService ) { return supplyAsyncTask( () -> client.admin() .indices() .prepareGetIndex() - .setIndices(indexName) + .setIndices(concreteIndexName) .get(GET_INDEX_UUID_TIMEOUT) - .getSetting(indexName, IndexMetadata.SETTING_INDEX_UUID), + .getSetting(concreteIndexName, IndexMetadata.SETTING_INDEX_UUID), executorService, - "Error while retrieving index UUID for index [" + indexName + "]" + "Error while retrieving index UUID for index [" + concreteIndexName + "]" ); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java index f44409daa37f8..3ebfad04a0f13 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java @@ -55,8 +55,10 @@ public SecurityFeatureSetUsage(StreamInput in) throws IOException { realmsUsage = in.readGenericMap(); rolesStoreUsage = in.readGenericMap(); sslUsage = in.readGenericMap(); - tokenServiceUsage = in.readGenericMap(); - apiKeyServiceUsage = in.readGenericMap(); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { + tokenServiceUsage = in.readGenericMap(); + apiKeyServiceUsage = in.readGenericMap(); + } auditUsage = in.readGenericMap(); ipFilterUsage = in.readGenericMap(); anonymousUsage = in.readGenericMap(); @@ -121,8 +123,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeGenericMap(realmsUsage); out.writeGenericMap(rolesStoreUsage); out.writeGenericMap(sslUsage); - out.writeGenericMap(tokenServiceUsage); - out.writeGenericMap(apiKeyServiceUsage); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { + out.writeGenericMap(tokenServiceUsage); + out.writeGenericMap(apiKeyServiceUsage); + } out.writeGenericMap(auditUsage); out.writeGenericMap(ipFilterUsage); out.writeGenericMap(anonymousUsage); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java index 59c16fc8a7a72..8fe018a825468 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java @@ -59,6 +59,9 @@ public TokensInvalidationResult(StreamInput in) throws IOException { this.invalidatedTokens = in.readStringCollectionAsList(); this.previouslyInvalidatedTokens = in.readStringCollectionAsList(); this.errors = in.readCollectionAsList(StreamInput::readException); + if (in.getTransportVersion().before(TransportVersions.V_7_2_0)) { + in.readVInt(); + } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { this.restStatus = RestStatus.readFrom(in); } @@ -108,6 +111,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(invalidatedTokens); out.writeStringCollection(previouslyInvalidatedTokens); out.writeCollection(errors, StreamOutput::writeException); + if (out.getTransportVersion().before(TransportVersions.V_7_2_0)) { + out.writeVInt(5); + } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { RestStatus.writeTo(out, restStatus); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java index 24f0a52436203..92bb037888495 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java @@ -159,13 +159,15 @@ private static void buildRoleQuery( if (queryBuilder != null) { failIfQueryUsesClient(queryBuilder, context); Query roleQuery = context.toQuery(queryBuilder).query(); - filter.add(roleQuery, SHOULD); - if (context.nestedLookup() != NestedLookup.EMPTY) { + if (context.nestedLookup() == NestedLookup.EMPTY) { + filter.add(roleQuery, SHOULD); + } else { if (NestedHelper.mightMatchNestedDocs(roleQuery, context)) { roleQuery = new BooleanQuery.Builder().add(roleQuery, FILTER) .add(Queries.newNonNestedFilter(context.indexVersionCreated()), FILTER) .build(); } + filter.add(roleQuery, SHOULD); // If access is allowed on root doc then also access is allowed on all nested docs of that root document: BitSetProducer rootDocs = context.bitsetFilter(Queries.newNonNestedFilter(context.indexVersionCreated())); ToChildBlockJoinQuery includeNestedDocs = new ToChildBlockJoinQuery(roleQuery, rootDocs); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java index 4751f66cf548e..89b42228d8918 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java @@ -23,9 +23,12 @@ import org.apache.lucene.search.TotalHitCountCollectorManager; import org.apache.lucene.store.Directory; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.IndexSettings; @@ -33,9 +36,11 @@ import org.elasticsearch.index.mapper.KeywordFieldMapper.KeywordFieldType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperMetrics; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.mapper.MockFieldMapper; +import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.TermsQueryBuilder; @@ -45,6 +50,9 @@ import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.test.AbstractBuilderTestCase; import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; @@ -52,6 +60,8 @@ import org.elasticsearch.xpack.core.security.authz.permission.DocumentPermissions; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import java.io.IOException; +import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -340,6 +350,176 @@ protected IndicesAccessControl getIndicesAccessControl() { directory.close(); } + @Override + protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("f1") + .field("type", "keyword") + .endObject() + .startObject("nested1") + .field("type", "nested") + .startObject("properties") + .startObject("field") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); + mapperService.merge( + MapperService.SINGLE_MAPPING_NAME, + new CompressedXContent(Strings.toString(builder)), + MapperService.MergeReason.MAPPING_UPDATE + ); + } + + public void testDLSWithNestedDocs() throws Exception { + Directory directory = newDirectory(); + try ( + IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + var parser = mapperService().documentParser(); + String doc = """ + { + "f1": "value", + "nested1": [ + { + "field": "0" + }, + { + "field": "1" + }, + {} + ] + } + """; + var parsedDoc = parser.parseDocument( + new SourceToParse("0", new BytesArray(doc), XContentType.JSON), + mapperService().mappingLookup() + ); + iw.addDocuments(parsedDoc.docs()); + + doc = """ + { + "nested1": [ + { + "field": "12" + }, + { + "field": "13" + }, + {} + ] + } + """; + parsedDoc = parser.parseDocument( + new SourceToParse("1", new BytesArray(doc), XContentType.JSON), + mapperService().mappingLookup() + ); + iw.addDocuments(parsedDoc.docs()); + + doc = """ + { + "f1": "value", + "nested1": [ + { + "field": "12" + }, + {} + ] + } + """; + parsedDoc = parser.parseDocument( + new SourceToParse("2", new BytesArray(doc), XContentType.JSON), + mapperService().mappingLookup() + ); + iw.addDocuments(parsedDoc.docs()); + + doc = """ + { + "nested1": [ + { + "field": "12" + }, + {} + ] + } + """; + parsedDoc = parser.parseDocument( + new SourceToParse("3", new BytesArray(doc), XContentType.JSON), + mapperService().mappingLookup() + ); + iw.addDocuments(parsedDoc.docs()); + + iw.commit(); + } + + DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap( + DirectoryReader.open(directory), + new ShardId(indexSettings().getIndex(), 0) + ); + SearchExecutionContext context = createSearchExecutionContext(new IndexSearcher(directoryReader)); + + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext); + final Authentication authentication = AuthenticationTestHelper.builder().build(); + new AuthenticationContextSerializer().writeToContext(authentication, threadContext); + + Set queries = new HashSet<>(); + queries.add(new BytesArray("{\"bool\": { \"must_not\": { \"exists\": { \"field\": \"f1\" } } } }")); + IndicesAccessControl.IndexAccessControl indexAccessControl = new IndicesAccessControl.IndexAccessControl( + FieldPermissions.DEFAULT, + DocumentPermissions.filteredBy(queries) + ); + + DocumentSubsetBitsetCache bitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY, Executors.newSingleThreadExecutor()); + + final MockLicenseState licenseState = mock(MockLicenseState.class); + when(licenseState.isAllowed(DOCUMENT_LEVEL_SECURITY_FEATURE)).thenReturn(true); + ScriptService scriptService = mock(ScriptService.class); + SecurityIndexReaderWrapper wrapper = new SecurityIndexReaderWrapper( + s -> context, + bitsetCache, + securityContext, + licenseState, + scriptService + ) { + + @Override + protected IndicesAccessControl getIndicesAccessControl() { + IndicesAccessControl indicesAccessControl = new IndicesAccessControl( + true, + singletonMap(indexSettings().getIndex().getName(), indexAccessControl) + ); + return indicesAccessControl; + } + }; + + DirectoryReader wrappedDirectoryReader = wrapper.apply(directoryReader); + IndexSearcher indexSearcher = new ContextIndexSearcher( + wrappedDirectoryReader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true + ); + + ScoreDoc[] hits = indexSearcher.search(new MatchAllDocsQuery(), 1000).scoreDocs; + assertThat(Arrays.stream(hits).map(h -> h.doc).collect(Collectors.toSet()), containsInAnyOrder(4, 5, 6, 7, 11, 12, 13)); + + hits = indexSearcher.search(Queries.newNonNestedFilter(context.indexVersionCreated()), 1000).scoreDocs; + assertThat(Arrays.stream(hits).map(h -> h.doc).collect(Collectors.toSet()), containsInAnyOrder(7, 13)); + + bitsetCache.close(); + directoryReader.close(); + directory.close(); + } + private static MappingLookup createMappingLookup(List concreteFields) { List mappers = concreteFields.stream().map(MockFieldMapper::new).collect(Collectors.toList()); return MappingLookup.fromMappers(Mapping.EMPTY, mappers, emptyList()); diff --git a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-mappings.json index 651e1c84da73a..5afa557e1405e 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-mappings.json @@ -7,7 +7,7 @@ "dynamic": "false", "_meta": { "pipeline": { - "default_name": "ent-search-generic-ingestion", + "default_name": "search-default-ingestion", "default_extract_binary_content": true, "default_run_ml_inference": true, "default_reduce_whitespace": true diff --git a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/generic_ingestion_pipeline.json b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/generic_ingestion_pipeline.json deleted file mode 100644 index e2a2cbd460117..0000000000000 --- a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/generic_ingestion_pipeline.json +++ /dev/null @@ -1,130 +0,0 @@ -{ - "version": ${xpack.application.connector.template.version}, - "description": "Generic Enterprise Search ingest pipeline", - "_meta": { - "managed_by": "Enterprise Search", - "managed": true - }, - "processors": [ - { - "attachment": { - "description": "Extract text from binary attachments", - "field": "_attachment", - "target_field": "_extracted_attachment", - "ignore_missing": true, - "indexed_chars_field": "_attachment_indexed_chars", - "if": "ctx?._extract_binary_content == true", - "on_failure": [ - { - "append": { - "description": "Record error information", - "field": "_ingestion_errors", - "value": "Processor 'attachment' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" - } - } - ], - "remove_binary": false - } - }, - { - "set": { - "tag": "set_body", - "description": "Set any extracted text on the 'body' field", - "field": "body", - "copy_from": "_extracted_attachment.content", - "ignore_empty_value": true, - "if": "ctx?._extract_binary_content == true", - "on_failure": [ - { - "append": { - "description": "Record error information", - "field": "_ingestion_errors", - "value": "Processor 'set' with tag 'set_body' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" - } - } - ] - } - }, - { - "gsub": { - "tag": "remove_replacement_chars", - "description": "Remove unicode 'replacement' characters", - "field": "body", - "pattern": "�", - "replacement": "", - "ignore_missing": true, - "if": "ctx?._extract_binary_content == true", - "on_failure": [ - { - "append": { - "description": "Record error information", - "field": "_ingestion_errors", - "value": "Processor 'gsub' with tag 'remove_replacement_chars' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" - } - } - ] - } - }, - { - "gsub": { - "tag": "remove_extra_whitespace", - "description": "Squish whitespace", - "field": "body", - "pattern": "\\s+", - "replacement": " ", - "ignore_missing": true, - "if": "ctx?._reduce_whitespace == true", - "on_failure": [ - { - "append": { - "description": "Record error information", - "field": "_ingestion_errors", - "value": "Processor 'gsub' with tag 'remove_extra_whitespace' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" - } - } - ] - } - }, - { - "trim" : { - "description": "Trim leading and trailing whitespace", - "field": "body", - "ignore_missing": true, - "if": "ctx?._reduce_whitespace == true", - "on_failure": [ - { - "append": { - "description": "Record error information", - "field": "_ingestion_errors", - "value": "Processor 'trim' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" - } - } - ] - } - }, - { - "remove": { - "tag": "remove_meta_fields", - "description": "Remove meta fields", - "field": [ - "_attachment", - "_attachment_indexed_chars", - "_extracted_attachment", - "_extract_binary_content", - "_reduce_whitespace", - "_run_ml_inference" - ], - "ignore_missing": true, - "on_failure": [ - { - "append": { - "description": "Record error information", - "field": "_ingestion_errors", - "value": "Processor 'remove' with tag 'remove_meta_fields' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" - } - } - ] - } - } - ] -} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java index e630f929bc09b..fd35acc89db5c 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java @@ -48,10 +48,6 @@ public class ConnectorTemplateRegistry extends IndexTemplateRegistry { public static final String MANAGED_CONNECTOR_INDEX_PREFIX = "content-"; // Pipeline constants - - public static final String ENT_SEARCH_GENERIC_PIPELINE_NAME = "ent-search-generic-ingestion"; - public static final String ENT_SEARCH_GENERIC_PIPELINE_FILE = "generic_ingestion_pipeline"; - public static final String SEARCH_DEFAULT_PIPELINE_NAME = "search-default-ingestion"; public static final String SEARCH_DEFAULT_PIPELINE_FILE = "search_default_pipeline"; @@ -111,12 +107,6 @@ public class ConnectorTemplateRegistry extends IndexTemplateRegistry { @Override protected List getIngestPipelines() { return List.of( - new JsonIngestPipelineConfig( - ENT_SEARCH_GENERIC_PIPELINE_NAME, - ROOT_RESOURCE_PATH + ENT_SEARCH_GENERIC_PIPELINE_FILE + ".json", - REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE - ), new JsonIngestPipelineConfig( SEARCH_DEFAULT_PIPELINE_NAME, ROOT_RESOURCE_PATH + SEARCH_DEFAULT_PIPELINE_FILE + ".json", diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java index 5b27cc7a3e05a..3a53ed977318d 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java @@ -50,7 +50,6 @@ public final class QueryRuleRetrieverBuilder extends CompoundRetrieverBuilder PARSER = new ConstructingObjectParser<>( diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIngestPipelineTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIngestPipelineTests.java index f4a92e51e8c6a..c3d4bf8b72ff5 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIngestPipelineTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIngestPipelineTests.java @@ -50,7 +50,7 @@ public void testToXContent() throws IOException { String content = XContentHelper.stripWhitespace(""" { "extract_binary_content": true, - "name": "ent-search-generic-ingestion", + "name": "search-default-ingestion", "reduce_whitespace": true, "run_ml_inference": false } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java index a4c7015afafcb..068b99626af9d 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java @@ -132,10 +132,7 @@ public void testThatNonExistingComponentTemplatesAreAddedImmediately() throws Ex ClusterChangedEvent event = createClusterChangedEvent( Collections.emptyMap(), Collections.emptyMap(), - Collections.singletonMap( - ConnectorTemplateRegistry.ENT_SEARCH_GENERIC_PIPELINE_NAME, - ConnectorTemplateRegistry.REGISTRY_VERSION - ), + Collections.singletonMap(ConnectorTemplateRegistry.SEARCH_DEFAULT_PIPELINE_NAME, ConnectorTemplateRegistry.REGISTRY_VERSION), Collections.emptyMap(), nodes ); @@ -169,10 +166,7 @@ public void testThatVersionedOldComponentTemplatesAreUpgraded() throws Exception ConnectorTemplateRegistry.CONNECTOR_TEMPLATE_NAME + "-settings", ConnectorTemplateRegistry.REGISTRY_VERSION - 1 ), - Collections.singletonMap( - ConnectorTemplateRegistry.ENT_SEARCH_GENERIC_PIPELINE_NAME, - ConnectorTemplateRegistry.REGISTRY_VERSION - ), + Collections.singletonMap(ConnectorTemplateRegistry.SEARCH_DEFAULT_PIPELINE_NAME, ConnectorTemplateRegistry.REGISTRY_VERSION), Collections.emptyMap(), nodes ); @@ -189,10 +183,7 @@ public void testThatUnversionedOldComponentTemplatesAreUpgraded() throws Excepti ClusterChangedEvent event = createClusterChangedEvent( Collections.emptyMap(), Collections.singletonMap(ConnectorTemplateRegistry.CONNECTOR_TEMPLATE_NAME + "-mappings", null), - Collections.singletonMap( - ConnectorTemplateRegistry.ENT_SEARCH_GENERIC_PIPELINE_NAME, - ConnectorTemplateRegistry.REGISTRY_VERSION - ), + Collections.singletonMap(ConnectorTemplateRegistry.SEARCH_DEFAULT_PIPELINE_NAME, ConnectorTemplateRegistry.REGISTRY_VERSION), Collections.emptyMap(), nodes ); diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java index 734c6eaf86965..bcb647d978abb 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java @@ -225,7 +225,7 @@ public void testToXContent() throws IOException { "name":"test-name", "pipeline":{ "extract_binary_content":true, - "name":"ent-search-generic-ingestion", + "name":"search-default-ingestion", "reduce_whitespace":true, "run_ml_inference":false }, @@ -286,7 +286,7 @@ public void testToContent_WithNullValues() throws IOException { "name": null, "pipeline":{ "extract_binary_content":true, - "name":"ent-search-generic-ingestion", + "name":"search-default-ingestion", "reduce_whitespace":true, "run_ml_inference":false }, @@ -350,7 +350,7 @@ public void testToXContent_withOptionalFieldsMissing() throws IOException { "name": null, "pipeline":{ "extract_binary_content":true, - "name":"ent-search-generic-ingestion", + "name":"search-default-ingestion", "reduce_whitespace":true, "run_ml_inference":false }, diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java index 81b05ce25e177..ed3338c715bdf 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java @@ -77,7 +77,7 @@ public void testFromXContent_WithAllFields_AllSet() throws IOException { "language": "english", "pipeline": { "extract_binary_content": true, - "name": "ent-search-generic-ingestion", + "name": "search-default-ingestion", "reduce_whitespace": true, "run_ml_inference": false }, @@ -160,7 +160,7 @@ public void testFromXContent_WithOnlyNonNullableFieldsSet_DoesNotThrow() throws "language": "english", "pipeline": { "extract_binary_content": true, - "name": "ent-search-generic-ingestion", + "name": "search-default-ingestion", "reduce_whitespace": true, "run_ml_inference": false }, @@ -218,7 +218,7 @@ public void testFromXContent_WithAllNullableFieldsSetToNull_DoesNotThrow() throw "language": "english", "pipeline": { "extract_binary_content": true, - "name": "ent-search-generic-ingestion", + "name": "search-default-ingestion", "reduce_whitespace": true, "run_ml_inference": false }, @@ -275,7 +275,7 @@ public void testSyncJobConnectorFromXContent_WithAllFieldsSet() throws IOExcepti "language": "english", "pipeline": { "extract_binary_content": true, - "name": "ent-search-generic-ingestion", + "name": "search-default-ingestion", "reduce_whitespace": true, "run_ml_inference": false }, diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/AttributeSet.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/AttributeSet.java index a092e17931237..8a075e8887512 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/AttributeSet.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/AttributeSet.java @@ -174,8 +174,12 @@ public Stream parallelStream() { } @Override - public boolean equals(Object o) { - return delegate.equals(o); + public boolean equals(Object obj) { + if (obj instanceof AttributeSet as) { + obj = as.delegate; + } + + return delegate.equals(obj); } @Override diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/AttributeMapTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/AttributeMapTests.java index 511c7f4b1d2f8..ade79c8168076 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/AttributeMapTests.java +++ b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/AttributeMapTests.java @@ -30,7 +30,7 @@ public class AttributeMapTests extends ESTestCase { - private static Attribute a(String name) { + static Attribute a(String name) { return new UnresolvedAttribute(Source.EMPTY, name); } diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/AttributeSetTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/AttributeSetTests.java new file mode 100644 index 0000000000000..0e97773fb90d2 --- /dev/null +++ b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/AttributeSetTests.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.esql.core.expression; + +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.elasticsearch.xpack.esql.core.expression.AttributeMapTests.a; + +public class AttributeSetTests extends ESTestCase { + + public void testEquals() { + Attribute a1 = a("1"); + Attribute a2 = a("2"); + + AttributeSet first = new AttributeSet(List.of(a1, a2)); + assertEquals(first, first); + + AttributeSet second = new AttributeSet(); + second.add(a1); + second.add(a2); + + assertEquals(first, second); + assertEquals(second, first); + + AttributeSet third = new AttributeSet(); + third.add(a("1")); + third.add(a("2")); + + assertNotEquals(first, third); + assertNotEquals(third, first); + + assertEquals(AttributeSet.EMPTY, AttributeSet.EMPTY); + assertEquals(AttributeSet.EMPTY, first.intersect(third)); + assertEquals(third.intersect(first), AttributeSet.EMPTY); + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index 7adafa908ce4f..f0bdf089f69d1 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -63,7 +63,7 @@ import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.GEO; public final class CsvTestUtils { - private static final int MAX_WIDTH = 20; + private static final int MAX_WIDTH = 80; private static final CsvPreference CSV_SPEC_PREFERENCES = new CsvPreference.Builder('"', '|', "\r\n").build(); private static final String NULL_VALUE = "null"; private static final char ESCAPE_CHAR = '\\'; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index e786c87a9eee5..7aca63182e2b1 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -365,7 +365,7 @@ private static void load(RestClient client, TestsDataset dataset, Logger logger, if (mapping == null) { throw new IllegalArgumentException("Cannot find resource " + mappingName); } - final String dataName = "/" + dataset.dataFileName; + final String dataName = "/data/" + dataset.dataFileName; URL data = CsvTestsDataLoader.class.getResource(dataName); if (data == null) { throw new IllegalArgumentException("Cannot find resource " + dataName); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec index b29c489910f65..8cfde2bb9bde7 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec @@ -145,6 +145,24 @@ AVG(salary):double | bucket:date // end::bucket_in_agg-result[] ; +bucketWithOffset#[skip:-8.13.99, reason:BUCKET renamed in 8.14] +// tag::bucketWithOffset[] +FROM employees +| STATS dates = MV_SORT(VALUES(birth_date)) BY b = BUCKET(birth_date + 1 HOUR, 1 YEAR) - 1 HOUR +| EVAL d_count = MV_COUNT(dates) +| SORT d_count, b +| LIMIT 3 +// end::bucketWithOffset[] +; + +// tag::bucketWithOffset-result[] +dates:date |b:date |d_count:integer +1965-01-03T00:00:00.000Z |1964-12-31T23:00:00.000Z|1 +[1955-01-21T00:00:00.000Z, 1955-08-20T00:00:00.000Z, 1955-08-28T00:00:00.000Z, 1955-10-04T00:00:00.000Z]|1954-12-31T23:00:00.000Z|4 +[1957-04-04T00:00:00.000Z, 1957-05-23T00:00:00.000Z, 1957-05-25T00:00:00.000Z, 1957-12-03T00:00:00.000Z]|1956-12-31T23:00:00.000Z|4 +// end::bucketWithOffset-result[] +; + docsBucketMonth#[skip:-8.13.99, reason:BUCKET renamed in 8.14] //tag::docsBucketMonth[] FROM employees diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/addresses.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/addresses.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/addresses.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/addresses.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ages.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/ages.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/ages.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/ages.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/airport_city_boundaries.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/airport_city_boundaries.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/airport_city_boundaries.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/airport_city_boundaries.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/airports.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/airports.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/airports.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/airports.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/airports_mp.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/airports_mp.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/airports_mp.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/airports_mp.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/airports_web.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/airports_web.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/airports_web.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/airports_web.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/alerts.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/alerts.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/alerts.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/alerts.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/apps.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/apps.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/apps.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/apps.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/books.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/books.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/cartesian_multipolygons.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/cartesian_multipolygons.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/cartesian_multipolygons.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/cartesian_multipolygons.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/client_cidr.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/client_cidr.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/client_cidr.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/client_cidr.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/clientips.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/clientips.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/clientips.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/clientips.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/countries_bbox.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/countries_bbox.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox_web.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/countries_bbox_web.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox_web.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/countries_bbox_web.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/date_nanos.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/date_nanos.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/decades.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/decades.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/decades.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/decades.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/distances.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/distances.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/distances.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/distances.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/employees.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/employees.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/employees.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/employees.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/employees_incompatible.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/employees_incompatible.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/employees_incompatible.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/employees_incompatible.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/heights.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/heights.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/heights.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/heights.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/hosts.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/hosts.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/hosts.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/hosts.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/k8s.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/k8s.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/languages.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/languages.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages_nested_fields.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/languages_nested_fields.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages_nested_fields.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/languages_nested_fields.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages_non_unique_key.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/languages_non_unique_key.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages_non_unique_key.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/languages_non_unique_key.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/message_types.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/message_types.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/message_types.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/message_types.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/missing_ip_sample_data.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/missing_ip_sample_data.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/missing_ip_sample_data.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/missing_ip_sample_data.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/multivalue_geometries.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/multivalue_geometries.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/multivalue_geometries.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/multivalue_geometries.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/multivalue_points.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/multivalue_points.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/multivalue_points.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/multivalue_points.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_sample_data.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/mv_sample_data.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_sample_data.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/mv_sample_data.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/sample_data.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/sample_data.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_long.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/sample_data_ts_long.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_long.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/sample_data_ts_long.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_nanos.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/sample_data_ts_nanos.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_nanos.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/sample_data_ts_nanos.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/semantic_text.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/semantic_text.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ul_logs.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/ul_logs.csv similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/ul_logs.csv rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/ul_logs.csv diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java index 8de65847c3f85..8054b260f0060 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java @@ -10,26 +10,15 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.engine.SegmentsStats; -import org.elasticsearch.index.mapper.OnScriptError; -import org.elasticsearch.logging.LogManager; -import org.elasticsearch.logging.Logger; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.script.LongFieldScript; -import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptEngine; -import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.Before; import java.io.IOException; import java.util.Collection; -import java.util.Map; -import java.util.Set; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; @@ -40,8 +29,6 @@ */ public abstract class AbstractPausableIntegTestCase extends AbstractEsqlIntegTestCase { - private static final Logger LOGGER = LogManager.getLogger(AbstractPausableIntegTestCase.class); - protected static final Semaphore scriptPermits = new Semaphore(0); protected int pageSize = -1; @@ -108,53 +95,10 @@ public void setupIndex() throws IOException { } } - public static class PausableFieldPlugin extends Plugin implements ScriptPlugin { - + public static class PausableFieldPlugin extends AbstractPauseFieldPlugin { @Override - public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { - return new ScriptEngine() { - @Override - public String getType() { - return "pause"; - } - - @Override - @SuppressWarnings("unchecked") - public FactoryType compile( - String name, - String code, - ScriptContext context, - Map params - ) { - return (FactoryType) new LongFieldScript.Factory() { - @Override - public LongFieldScript.LeafFactory newFactory( - String fieldName, - Map params, - SearchLookup searchLookup, - OnScriptError onScriptError - ) { - return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { - @Override - public void execute() { - try { - assertTrue(scriptPermits.tryAcquire(1, TimeUnit.MINUTES)); - } catch (Exception e) { - throw new AssertionError(e); - } - LOGGER.debug("--> emitting value"); - emit(1); - } - }; - } - }; - } - - @Override - public Set> getSupportedContexts() { - return Set.of(LongFieldScript.CONTEXT); - } - }; + protected boolean onWait() throws InterruptedException { + return scriptPermits.tryAcquire(1, TimeUnit.MINUTES); } } } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPauseFieldPlugin.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPauseFieldPlugin.java new file mode 100644 index 0000000000000..5554f7e571dfb --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPauseFieldPlugin.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.OnScriptError; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.script.LongFieldScript; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.search.lookup.SearchLookup; + +import java.util.Collection; +import java.util.Map; +import java.util.Set; + +import static org.junit.Assert.assertTrue; + +/** + * A plugin that provides a script language "pause" that can be used to simulate slow running queries. + * See also {@link AbstractPausableIntegTestCase}. + */ +public abstract class AbstractPauseFieldPlugin extends Plugin implements ScriptPlugin { + + // Called when the engine enters the execute() method. + protected void onStartExecute() {} + + // Called when the engine needs to wait for further execution to be allowed. + protected abstract boolean onWait() throws InterruptedException; + + @Override + public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { + return new ScriptEngine() { + @Override + public String getType() { + return "pause"; + } + + @Override + @SuppressWarnings("unchecked") + public FactoryType compile( + String name, + String code, + ScriptContext context, + Map params + ) { + if (context == LongFieldScript.CONTEXT) { + return (FactoryType) new LongFieldScript.Factory() { + @Override + public LongFieldScript.LeafFactory newFactory( + String fieldName, + Map params, + SearchLookup searchLookup, + OnScriptError onScriptError + ) { + return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { + @Override + public void execute() { + onStartExecute(); + try { + assertTrue(onWait()); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + emit(1); + } + }; + } + }; + } + throw new IllegalStateException("unsupported type " + context); + } + + @Override + public Set> getSupportedContexts() { + return Set.of(LongFieldScript.CONTEXT); + } + }; + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java index a2bba19db50fc..3926ea4c27a3d 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java @@ -19,14 +19,8 @@ import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.index.mapper.OnScriptError; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.script.LongFieldScript; -import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptEngine; -import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.transport.RemoteClusterAware; @@ -44,7 +38,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -80,7 +73,7 @@ protected Collection> nodePlugins(String clusterAlias) { plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); plugins.add(EsqlAsyncActionIT.LocalStateEsqlAsync.class); // allows the async_search DELETE action plugins.add(InternalExchangePlugin.class); - plugins.add(PauseFieldPlugin.class); + plugins.add(SimplePauseFieldPlugin.class); return plugins; } @@ -99,64 +92,7 @@ public List> getSettings() { @Before public void resetPlugin() { - PauseFieldPlugin.allowEmitting = new CountDownLatch(1); - PauseFieldPlugin.startEmitting = new CountDownLatch(1); - } - - public static class PauseFieldPlugin extends Plugin implements ScriptPlugin { - public static CountDownLatch startEmitting = new CountDownLatch(1); - public static CountDownLatch allowEmitting = new CountDownLatch(1); - - @Override - public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { - return new ScriptEngine() { - @Override - - public String getType() { - return "pause"; - } - - @Override - @SuppressWarnings("unchecked") - public FactoryType compile( - String name, - String code, - ScriptContext context, - Map params - ) { - if (context == LongFieldScript.CONTEXT) { - return (FactoryType) new LongFieldScript.Factory() { - @Override - public LongFieldScript.LeafFactory newFactory( - String fieldName, - Map params, - SearchLookup searchLookup, - OnScriptError onScriptError - ) { - return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { - @Override - public void execute() { - startEmitting.countDown(); - try { - assertTrue(allowEmitting.await(30, TimeUnit.SECONDS)); - } catch (InterruptedException e) { - throw new AssertionError(e); - } - emit(1); - } - }; - } - }; - } - throw new IllegalStateException("unsupported type " + context); - } - - @Override - public Set> getSupportedContexts() { - return Set.of(LongFieldScript.CONTEXT); - } - }; - } + SimplePauseFieldPlugin.resetPlugin(); } /** @@ -184,7 +120,7 @@ public void testSuccessfulPathways() throws Exception { } // wait until we know that the query against 'remote-b:blocking' has started - PauseFieldPlugin.startEmitting.await(30, TimeUnit.SECONDS); + SimplePauseFieldPlugin.startEmitting.await(30, TimeUnit.SECONDS); // wait until the query of 'cluster-a:logs-*' has finished (it is not blocked since we are not searching the 'blocking' index on it) assertBusy(() -> { @@ -234,7 +170,7 @@ public void testSuccessfulPathways() throws Exception { } // allow remoteB query to proceed - PauseFieldPlugin.allowEmitting.countDown(); + SimplePauseFieldPlugin.allowEmitting.countDown(); // wait until both remoteB and local queries have finished assertBusy(() -> { diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java index 17f5f81486651..cfe6fdeccb190 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java @@ -15,18 +15,11 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.compute.operator.DriverTaskRunner; import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.mapper.OnScriptError; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.script.LongFieldScript; -import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptEngine; -import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.transport.TransportService; @@ -38,9 +31,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; @@ -63,7 +53,7 @@ protected Collection> nodePlugins(String clusterAlias) { List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); plugins.add(InternalExchangePlugin.class); - plugins.add(PauseFieldPlugin.class); + plugins.add(SimplePauseFieldPlugin.class); return plugins; } @@ -82,63 +72,7 @@ public List> getSettings() { @Before public void resetPlugin() { - PauseFieldPlugin.allowEmitting = new CountDownLatch(1); - PauseFieldPlugin.startEmitting = new CountDownLatch(1); - } - - public static class PauseFieldPlugin extends Plugin implements ScriptPlugin { - public static CountDownLatch startEmitting = new CountDownLatch(1); - public static CountDownLatch allowEmitting = new CountDownLatch(1); - - @Override - public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { - return new ScriptEngine() { - @Override - public String getType() { - return "pause"; - } - - @Override - @SuppressWarnings("unchecked") - public FactoryType compile( - String name, - String code, - ScriptContext context, - Map params - ) { - if (context == LongFieldScript.CONTEXT) { - return (FactoryType) new LongFieldScript.Factory() { - @Override - public LongFieldScript.LeafFactory newFactory( - String fieldName, - Map params, - SearchLookup searchLookup, - OnScriptError onScriptError - ) { - return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { - @Override - public void execute() { - startEmitting.countDown(); - try { - assertTrue(allowEmitting.await(30, TimeUnit.SECONDS)); - } catch (InterruptedException e) { - throw new AssertionError(e); - } - emit(1); - } - }; - } - }; - } - throw new IllegalStateException("unsupported type " + context); - } - - @Override - public Set> getSupportedContexts() { - return Set.of(LongFieldScript.CONTEXT); - } - }; - } + SimplePauseFieldPlugin.resetPlugin(); } private void createRemoteIndex(int numDocs) throws Exception { @@ -169,7 +103,7 @@ public void testCancel() throws Exception { request.pragmas(randomPragmas()); PlainActionFuture requestFuture = new PlainActionFuture<>(); client().execute(EsqlQueryAction.INSTANCE, request, requestFuture); - assertTrue(PauseFieldPlugin.startEmitting.await(30, TimeUnit.SECONDS)); + assertTrue(SimplePauseFieldPlugin.startEmitting.await(30, TimeUnit.SECONDS)); List rootTasks = new ArrayList<>(); assertBusy(() -> { List tasks = client().admin().cluster().prepareListTasks().setActions(EsqlQueryAction.NAME).get().getTasks(); @@ -192,7 +126,7 @@ public void testCancel() throws Exception { } }); } finally { - PauseFieldPlugin.allowEmitting.countDown(); + SimplePauseFieldPlugin.allowEmitting.countDown(); } Exception error = expectThrows(Exception.class, requestFuture::actionGet); assertThat(error.getMessage(), containsString("proxy timeout")); @@ -223,7 +157,7 @@ public void testSameRemoteClusters() throws Exception { assertThat(tasks, hasSize(moreClusters + 1)); }); } finally { - PauseFieldPlugin.allowEmitting.countDown(); + SimplePauseFieldPlugin.allowEmitting.countDown(); } try (EsqlQueryResponse resp = future.actionGet(30, TimeUnit.SECONDS)) { // TODO: This produces incorrect results because data on the remote cluster is processed multiple times. @@ -244,7 +178,7 @@ public void testTasks() throws Exception { request.query("FROM *:test | STATS total=sum(const) | LIMIT 1"); request.pragmas(randomPragmas()); ActionFuture requestFuture = client().execute(EsqlQueryAction.INSTANCE, request); - assertTrue(PauseFieldPlugin.startEmitting.await(30, TimeUnit.SECONDS)); + assertTrue(SimplePauseFieldPlugin.startEmitting.await(30, TimeUnit.SECONDS)); try { assertBusy(() -> { List clusterTasks = client(REMOTE_CLUSTER).admin() @@ -270,7 +204,7 @@ public void testTasks() throws Exception { \\_ExchangeSinkOperator""")); }); } finally { - PauseFieldPlugin.allowEmitting.countDown(); + SimplePauseFieldPlugin.allowEmitting.countDown(); } requestFuture.actionGet(30, TimeUnit.SECONDS).close(); } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SimplePauseFieldPlugin.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SimplePauseFieldPlugin.java new file mode 100644 index 0000000000000..3ba73dd9a402e --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SimplePauseFieldPlugin.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +/** + * A plugin that provides a script language "pause" that can be used to simulate slow running queries. + * This implementation allows to know when it arrives at execute() via startEmitting and to allow the execution to proceed + * via allowEmitting. + */ +public class SimplePauseFieldPlugin extends AbstractPauseFieldPlugin { + public static CountDownLatch startEmitting = new CountDownLatch(1); + public static CountDownLatch allowEmitting = new CountDownLatch(1); + + public static void resetPlugin() { + allowEmitting = new CountDownLatch(1); + startEmitting = new CountDownLatch(1); + } + + @Override + public void onStartExecute() { + startEmitting.countDown(); + } + + @Override + public boolean onWait() throws InterruptedException { + return allowEmitting.await(30, TimeUnit.SECONDS); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index ecd0821c626bf..3d1bfdfd0ef42 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -691,15 +691,9 @@ private List resolveUsingColumns(List cols, List> parameter). To do so, you will need to + take into account how the language handles expressions within the `STATS` command: if these contain functions or + arithmetic operators, a virtual `EVAL` is inserted before and/or after the `STATS` command. Consequently, a double + compensation is needed to adjust the bucketed date value before the aggregation and then again after. For instance, + inserting a negative offset of `1 hour` to buckets of `1 year` looks like this:""", + file = "bucket", + tag = "bucketWithOffset" ) } ) public Bucket( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index b81b80a9fdbb4..1e0374c648579 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -322,13 +322,14 @@ private void doTest() throws Exception { } protected void assertResults(ExpectedResults expected, ActualResults actual, boolean ignoreOrder, Logger logger) { - CsvAssert.assertResults(expected, actual, ignoreOrder, logger); /* - * Comment the assertion above and enable the next two lines to see the results returned by ES without any assertions being done. + * Enable the next two lines to see the results returned by ES. * This is useful when creating a new test or trying to figure out what are the actual results. */ // CsvTestUtils.logMetaData(actual.columnNames(), actual.columnTypes(), LOGGER); // CsvTestUtils.logData(actual.values(), LOGGER); + + CsvAssert.assertResults(expected, actual, ignoreOrder, logger); } private static IndexResolution loadIndexResolution(String mappingName, String indexName, Map typeMapping) { @@ -425,7 +426,7 @@ private static CsvTestsDataLoader.TestsDataset testsDataset(LogicalPlan parsed) } private static TestPhysicalOperationProviders testOperationProviders(CsvTestsDataLoader.TestsDataset dataset) throws Exception { - var testData = loadPageFromCsv(CsvTests.class.getResource("/" + dataset.dataFileName()), dataset.typeMapping()); + var testData = loadPageFromCsv(CsvTests.class.getResource("/data/" + dataset.dataFileName()), dataset.typeMapping()); return new TestPhysicalOperationProviders(testData.v1(), testData.v2()); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 5d1ff43dfe31b..674eda8916c5a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.Literal; @@ -2190,6 +2191,36 @@ public void testLookupJoinUnknownField() { assertThat(e.getMessage(), containsString(errorMessage3 + "right side of join")); } + public void testMultipleLookupJoinsGiveDifferentAttributes() { + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); + + // The field attributes that get contributed by different LOOKUP JOIN commands must have different name ids, + // even if they have the same names. Otherwise, things like dependency analysis - like in PruneColumns - cannot work based on + // name ids and shadowing semantics proliferate into all kinds of optimizer code. + + String query = "FROM test" + + "| EVAL language_code = languages" + + "| LOOKUP JOIN languages_lookup ON language_code" + + "| LOOKUP JOIN languages_lookup ON language_code"; + LogicalPlan analyzedPlan = analyze(query); + + List lookupFields = new ArrayList<>(); + List> lookupFieldNames = new ArrayList<>(); + analyzedPlan.forEachUp(EsRelation.class, esRelation -> { + if (esRelation.indexMode() == IndexMode.LOOKUP) { + lookupFields.add(esRelation.outputSet()); + lookupFieldNames.add(esRelation.outputSet().stream().map(NamedExpression::name).collect(Collectors.toSet())); + } + }); + + assertEquals(lookupFieldNames.size(), 2); + assertEquals(lookupFieldNames.get(0), lookupFieldNames.get(1)); + + assertEquals(lookupFields.size(), 2); + AttributeSet intersection = lookupFields.get(0).intersect(lookupFields.get(1)); + assertEquals(AttributeSet.EMPTY, intersection); + } + public void testLookupJoinIndexMode() { assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java index fd2427dc8ac6a..46bebebff9c95 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java @@ -47,7 +47,6 @@ public class TextSimilarityRankRetrieverBuilder extends CompoundRetrieverBuilder public static final ParseField INFERENCE_ID_FIELD = new ParseField("inference_id"); public static final ParseField INFERENCE_TEXT_FIELD = new ParseField("inference_text"); public static final ParseField FIELD_FIELD = new ParseField("field"); - public static final ParseField RANK_WINDOW_SIZE_FIELD = new ParseField("rank_window_size"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(TextSimilarityRankBuilder.NAME, args -> { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/ServerSentEventsRestActionListener.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/ServerSentEventsRestActionListener.java index 3177474ea8ca6..bf94f072b6e04 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/ServerSentEventsRestActionListener.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/ServerSentEventsRestActionListener.java @@ -43,6 +43,7 @@ import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE; import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; +import static org.elasticsearch.rest.RestController.ERROR_TRACE_DEFAULT; /** * A version of {@link org.elasticsearch.rest.action.RestChunkedToXContentListener} that reads from a {@link Flow.Publisher} and encodes @@ -161,7 +162,7 @@ private ChunkedToXContent errorChunk(Throwable t) { } var errorParams = p; - if (errorParams.paramAsBoolean("error_trace", false) && status != RestStatus.UNAUTHORIZED) { + if (errorParams.paramAsBoolean("error_trace", ERROR_TRACE_DEFAULT) && status != RestStatus.UNAUTHORIZED) { errorParams = new ToXContent.DelegatingMapParams( Map.of(REST_EXCEPTION_SKIP_STACK_TRACE, "false", REST_EXCEPTION_SKIP_CAUSE, "true"), params diff --git a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceActionIT.java b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceActionIT.java new file mode 100644 index 0000000000000..b460c6abfeee4 --- /dev/null +++ b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceActionIT.java @@ -0,0 +1,250 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.action; + +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.MappingMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.datastreams.DataStreamsPlugin; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.migrate.MigratePlugin; + +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.REINDEX_DATA_STREAM_FEATURE_FLAG; + +public class CreateIndexFromSourceActionIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(MigratePlugin.class, ReindexPlugin.class, MockTransportService.TestPlugin.class, DataStreamsPlugin.class); + } + + public void testDestIndexCreated() throws Exception { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + indicesAdmin().create(new CreateIndexRequest(sourceIndex)).get(); + + // create from source + var destIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + assertAcked( + client().execute(CreateIndexFromSourceAction.INSTANCE, new CreateIndexFromSourceAction.Request(sourceIndex, destIndex)) + ); + + try { + indicesAdmin().getIndex(new GetIndexRequest().indices(destIndex)).actionGet(); + } catch (IndexNotFoundException e) { + fail(); + } + } + + public void testSettingsCopiedFromSource() throws Exception { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + // start with a static setting + var numShards = randomIntBetween(1, 10); + var staticSettings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards).build(); + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + indicesAdmin().create(new CreateIndexRequest(sourceIndex, staticSettings)).get(); + + // update with a dynamic setting + var numReplicas = randomIntBetween(0, 10); + var dynamicSettings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas).build(); + indicesAdmin().updateSettings(new UpdateSettingsRequest(dynamicSettings, sourceIndex)).actionGet(); + + // create from source + var destIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + assertAcked( + client().execute(CreateIndexFromSourceAction.INSTANCE, new CreateIndexFromSourceAction.Request(sourceIndex, destIndex)) + ); + + // assert both static and dynamic settings set on dest index + var settingsResponse = indicesAdmin().getSettings(new GetSettingsRequest().indices(destIndex)).actionGet(); + assertEquals(numReplicas, Integer.parseInt(settingsResponse.getSetting(destIndex, IndexMetadata.SETTING_NUMBER_OF_REPLICAS))); + assertEquals(numShards, Integer.parseInt(settingsResponse.getSetting(destIndex, IndexMetadata.SETTING_NUMBER_OF_SHARDS))); + } + + public void testMappingsCopiedFromSource() { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + String mapping = """ + { + "_doc":{ + "dynamic":"strict", + "properties":{ + "foo1":{ + "type":"text" + } + } + } + } + """; + indicesAdmin().create(new CreateIndexRequest(sourceIndex).mapping(mapping)).actionGet(); + + // create from source + var destIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + assertAcked( + client().execute(CreateIndexFromSourceAction.INSTANCE, new CreateIndexFromSourceAction.Request(sourceIndex, destIndex)) + ); + + var mappingsResponse = indicesAdmin().getMappings(new GetMappingsRequest().indices(sourceIndex, destIndex)).actionGet(); + Map mappings = mappingsResponse.mappings(); + var destMappings = mappings.get(destIndex).sourceAsMap(); + var sourceMappings = mappings.get(sourceIndex).sourceAsMap(); + + assertEquals(sourceMappings, destMappings); + // sanity check specific value from dest mapping + assertEquals("text", XContentMapValues.extractValue("properties.foo1.type", destMappings)); + } + + public void testSettingsOverridden() throws Exception { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + var numShardsSource = randomIntBetween(1, 10); + var numReplicasSource = randomIntBetween(0, 10); + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + var sourceSettings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShardsSource) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicasSource) + .build(); + indicesAdmin().create(new CreateIndexRequest(sourceIndex, sourceSettings)).get(); + + boolean overrideNumShards = randomBoolean(); + Settings settingsOverride = overrideNumShards + ? Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShardsSource + 1).build() + : Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicasSource + 1).build(); + + // create from source + var destIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + assertAcked( + client().execute( + CreateIndexFromSourceAction.INSTANCE, + new CreateIndexFromSourceAction.Request(sourceIndex, destIndex, settingsOverride, Map.of()) + ) + ); + + // assert settings overridden + int expectedShards = overrideNumShards ? numShardsSource + 1 : numShardsSource; + int expectedReplicas = overrideNumShards ? numReplicasSource : numReplicasSource + 1; + var settingsResponse = indicesAdmin().getSettings(new GetSettingsRequest().indices(destIndex)).actionGet(); + assertEquals(expectedShards, Integer.parseInt(settingsResponse.getSetting(destIndex, IndexMetadata.SETTING_NUMBER_OF_SHARDS))); + assertEquals(expectedReplicas, Integer.parseInt(settingsResponse.getSetting(destIndex, IndexMetadata.SETTING_NUMBER_OF_REPLICAS))); + } + + public void testSettingsNullOverride() throws Exception { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + var sourceSettings = Settings.builder().put(IndexMetadata.SETTING_BLOCKS_WRITE, true).build(); + indicesAdmin().create(new CreateIndexRequest(sourceIndex, sourceSettings)).get(); + + Settings settingsOverride = Settings.builder().putNull(IndexMetadata.SETTING_BLOCKS_WRITE).build(); + + // create from source + var destIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + assertAcked( + client().execute( + CreateIndexFromSourceAction.INSTANCE, + new CreateIndexFromSourceAction.Request(sourceIndex, destIndex, settingsOverride, Map.of()) + ) + ); + + // assert settings overridden + var settingsResponse = indicesAdmin().getSettings(new GetSettingsRequest().indices(destIndex)).actionGet(); + assertNull(settingsResponse.getSetting(destIndex, IndexMetadata.SETTING_BLOCKS_WRITE)); + } + + public void testMappingsOverridden() { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + String sourceMapping = """ + { + "_doc":{ + "dynamic":"strict", + "properties":{ + "foo1":{ + "type":"text" + }, + "foo2":{ + "type":"boolean" + } + } + } + } + """; + indicesAdmin().create(new CreateIndexRequest(sourceIndex).mapping(sourceMapping)).actionGet(); + + String mappingOverrideStr = """ + { + "_doc":{ + "dynamic":"strict", + "properties":{ + "foo1":{ + "type":"integer" + }, + "foo3": { + "type":"keyword" + } + } + } + } + """; + var mappingOverride = XContentHelper.convertToMap(JsonXContent.jsonXContent, mappingOverrideStr, false); + + // create from source + var destIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + assertAcked( + client().execute( + CreateIndexFromSourceAction.INSTANCE, + new CreateIndexFromSourceAction.Request(sourceIndex, destIndex, Settings.EMPTY, mappingOverride) + ) + ); + + var mappingsResponse = indicesAdmin().getMappings(new GetMappingsRequest().indices(destIndex)).actionGet(); + Map mappings = mappingsResponse.mappings(); + var destMappings = mappings.get(destIndex).sourceAsMap(); + + String expectedMappingStr = """ + { + "dynamic":"strict", + "properties":{ + "foo1":{ + "type":"integer" + }, + "foo2": { + "type":"boolean" + }, + "foo3": { + "type":"keyword" + } + } + } + """; + var expectedMapping = XContentHelper.convertToMap(JsonXContent.jsonXContent, expectedMappingStr, false); + assertEquals(expectedMapping, destMappings); + } +} diff --git a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexIT.java b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexTransportActionIT.java similarity index 98% rename from x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexIT.java rename to x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexTransportActionIT.java index e492f035da866..0ca58ecf0f0d5 100644 --- a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexIT.java +++ b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexTransportActionIT.java @@ -53,7 +53,7 @@ import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.REINDEX_DATA_STREAM_FEATURE_FLAG; import static org.hamcrest.Matchers.equalTo; -public class ReindexDatastreamIndexIT extends ESIntegTestCase { +public class ReindexDatastreamIndexTransportActionIT extends ESIntegTestCase { private static final String MAPPING = """ { @@ -126,12 +126,14 @@ public void testDestIndexContainsDocs() throws Exception { assertHitCount(prepareSearch(response.getDestIndex()).setSize(0), numDocs); } - public void testSetSourceToReadOnly() throws Exception { + public void testSetSourceToBlockWrites() throws Exception { assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + var settings = randomBoolean() ? Settings.builder().put(IndexMetadata.SETTING_BLOCKS_WRITE, true).build() : Settings.EMPTY; + // empty source index var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); - indicesAdmin().create(new CreateIndexRequest(sourceIndex)).get(); + indicesAdmin().create(new CreateIndexRequest(sourceIndex, settings)).get(); // call reindex client().execute(ReindexDataStreamIndexAction.INSTANCE, new ReindexDataStreamIndexAction.Request(sourceIndex)).actionGet(); diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java index f42d05727b9fd..d9dffdefafa2c 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java @@ -34,6 +34,8 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xpack.migrate.action.CancelReindexDataStreamAction; import org.elasticsearch.xpack.migrate.action.CancelReindexDataStreamTransportAction; +import org.elasticsearch.xpack.migrate.action.CreateIndexFromSourceAction; +import org.elasticsearch.xpack.migrate.action.CreateIndexFromSourceTransportAction; import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusAction; import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusTransportAction; import org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction; @@ -87,6 +89,7 @@ public List getRestHandlers( actions.add(new ActionHandler<>(GetMigrationReindexStatusAction.INSTANCE, GetMigrationReindexStatusTransportAction.class)); actions.add(new ActionHandler<>(CancelReindexDataStreamAction.INSTANCE, CancelReindexDataStreamTransportAction.class)); actions.add(new ActionHandler<>(ReindexDataStreamIndexAction.INSTANCE, ReindexDataStreamIndexTransportAction.class)); + actions.add(new ActionHandler<>(CreateIndexFromSourceAction.INSTANCE, CreateIndexFromSourceTransportAction.class)); } return actions; } diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceAction.java new file mode 100644 index 0000000000000..d67eaee3d251f --- /dev/null +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceAction.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.migrate.action; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class CreateIndexFromSourceAction extends ActionType { + + public static final String NAME = "indices:admin/index/create_from_source"; + + public static final ActionType INSTANCE = new CreateIndexFromSourceAction(); + + private CreateIndexFromSourceAction() { + super(NAME); + } + + public static class Request extends ActionRequest implements IndicesRequest { + + private final String sourceIndex; + private final String destIndex; + private final Settings settingsOverride; + private final Map mappingsOverride; + + public Request(String sourceIndex, String destIndex) { + this(sourceIndex, destIndex, Settings.EMPTY, Map.of()); + } + + public Request(String sourceIndex, String destIndex, Settings settingsOverride, Map mappingsOverride) { + Objects.requireNonNull(mappingsOverride); + this.sourceIndex = sourceIndex; + this.destIndex = destIndex; + this.settingsOverride = settingsOverride; + this.mappingsOverride = mappingsOverride; + } + + @SuppressWarnings("unchecked") + public Request(StreamInput in) throws IOException { + super(in); + this.sourceIndex = in.readString(); + this.destIndex = in.readString(); + this.settingsOverride = Settings.readSettingsFromStream(in); + this.mappingsOverride = (Map) in.readGenericValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(sourceIndex); + out.writeString(destIndex); + settingsOverride.writeTo(out); + out.writeGenericValue(mappingsOverride); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public String getSourceIndex() { + return sourceIndex; + } + + public String getDestIndex() { + return destIndex; + } + + public Settings getSettingsOverride() { + return settingsOverride; + } + + public Map getMappingsOverride() { + return mappingsOverride; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(sourceIndex, request.sourceIndex) + && Objects.equals(destIndex, request.destIndex) + && Objects.equals(settingsOverride, request.settingsOverride) + && Objects.equals(mappingsOverride, request.mappingsOverride); + } + + @Override + public int hashCode() { + return Objects.hash(sourceIndex, destIndex, settingsOverride, mappingsOverride); + } + + @Override + public String[] indices() { + return new String[] { sourceIndex }; + } + + @Override + public IndicesOptions indicesOptions() { + return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + } + } +} diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceTransportAction.java new file mode 100644 index 0000000000000..968b2220628a9 --- /dev/null +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceTransportAction.java @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.migrate.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.MappingMetadata; +import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +public class CreateIndexFromSourceTransportAction extends HandledTransportAction< + CreateIndexFromSourceAction.Request, + AcknowledgedResponse> { + private static final Logger logger = LogManager.getLogger(CreateIndexFromSourceTransportAction.class); + + private final ClusterService clusterService; + private final Client client; + private final IndexScopedSettings indexScopedSettings; + + @Inject + public CreateIndexFromSourceTransportAction( + TransportService transportService, + ClusterService clusterService, + ActionFilters actionFilters, + Client client, + IndexScopedSettings indexScopedSettings + ) { + super( + CreateIndexFromSourceAction.NAME, + false, + transportService, + actionFilters, + CreateIndexFromSourceAction.Request::new, + transportService.getThreadPool().executor(ThreadPool.Names.GENERIC) + ); + this.clusterService = clusterService; + this.client = client; + this.indexScopedSettings = indexScopedSettings; + } + + @Override + protected void doExecute(Task task, CreateIndexFromSourceAction.Request request, ActionListener listener) { + + IndexMetadata sourceIndex = clusterService.state().getMetadata().index(request.getSourceIndex()); + + if (sourceIndex == null) { + listener.onFailure(new IndexNotFoundException(request.getSourceIndex())); + return; + } + + logger.debug("Creating destination index [{}] for source index [{}]", request.getDestIndex(), request.getSourceIndex()); + + Settings settings = Settings.builder() + // add source settings + .put(filterSettings(sourceIndex)) + // add override settings from request + .put(request.getSettingsOverride()) + .build(); + + Map mergeMappings; + try { + mergeMappings = mergeMappings(sourceIndex.mapping(), request.getMappingsOverride()); + } catch (IOException e) { + listener.onFailure(e); + return; + } + + var createIndexRequest = new CreateIndexRequest(request.getDestIndex()).settings(settings); + if (mergeMappings.isEmpty() == false) { + createIndexRequest.mapping(mergeMappings); + } + + client.admin().indices().create(createIndexRequest, listener.map(response -> response)); + } + + private static Map toMap(@Nullable MappingMetadata sourceMapping) { + return Optional.ofNullable(sourceMapping) + .map(MappingMetadata::source) + .map(CompressedXContent::uncompressed) + .map(s -> XContentHelper.convertToMap(s, true, XContentType.JSON).v2()) + .orElse(Map.of()); + } + + private static Map mergeMappings(@Nullable MappingMetadata sourceMapping, Map mappingAddition) + throws IOException { + Map combinedMappingMap = new HashMap<>(toMap(sourceMapping)); + XContentHelper.update(combinedMappingMap, mappingAddition, true); + return combinedMappingMap; + } + + // Filter source index settings to subset of settings that can be included during reindex. + // Similar to the settings filtering done when reindexing for upgrade in Kibana + // https://github.com/elastic/kibana/blob/8a8363f02cc990732eb9cbb60cd388643a336bed/x-pack + // /plugins/upgrade_assistant/server/lib/reindexing/index_settings.ts#L155 + private Settings filterSettings(IndexMetadata sourceIndex) { + return MetadataCreateIndexService.copySettingsFromSource(false, sourceIndex.getSettings(), indexScopedSettings, Settings.builder()) + .build(); + } +} diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexAction.java index 00c81fdc9fbc6..2e3fd1b76ed32 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexAction.java @@ -41,10 +41,6 @@ public Request(StreamInput in) throws IOException { this.sourceIndex = in.readString(); } - public static Request readFrom(StreamInput in) throws IOException { - return new Request(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java index 8863c45691c92..66b13a9ce22b0 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java @@ -10,10 +10,10 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockRequest; +import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockResponse; +import org.elasticsearch.action.admin.indices.readonly.TransportAddIndexBlockAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.IndicesOptions; @@ -22,9 +22,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.reindex.BulkByScrollResponse; @@ -37,28 +35,25 @@ import java.util.Locale; import java.util.Map; -import java.util.Set; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.APIBlock.READ_ONLY; +import static org.elasticsearch.cluster.metadata.IndexMetadata.APIBlock.WRITE; public class ReindexDataStreamIndexTransportAction extends HandledTransportAction< ReindexDataStreamIndexAction.Request, ReindexDataStreamIndexAction.Response> { private static final Logger logger = LogManager.getLogger(ReindexDataStreamIndexTransportAction.class); - - private static final Set SETTINGS_TO_ADD_BACK = Set.of(IndexMetadata.SETTING_BLOCKS_WRITE, IndexMetadata.SETTING_READ_ONLY); - private static final IndicesOptions IGNORE_MISSING_OPTIONS = IndicesOptions.fromOptions(true, true, false, false); private final ClusterService clusterService; private final Client client; - private final IndexScopedSettings indexScopedSettings; @Inject public ReindexDataStreamIndexTransportAction( TransportService transportService, ClusterService clusterService, ActionFilters actionFilters, - Client client, - IndexScopedSettings indexScopedSettings + Client client ) { super( ReindexDataStreamIndexAction.NAME, @@ -70,7 +65,6 @@ public ReindexDataStreamIndexTransportAction( ); this.clusterService = clusterService; this.client = client; - this.indexScopedSettings = indexScopedSettings; } @Override @@ -96,20 +90,19 @@ protected void doExecute( SubscribableListener.newForked(l -> setBlockWrites(sourceIndexName, l)) .andThen(l -> deleteDestIfExists(destIndexName, l)) - .andThen(l -> createIndex(sourceIndex, destIndexName, l)) + .andThen(l -> createIndex(sourceIndex, destIndexName, l)) .andThen(l -> reindex(sourceIndexName, destIndexName, l)) - .andThen(l -> updateSettings(settingsBefore, destIndexName, l)) + .andThen(l -> addBlockIfFromSource(WRITE, settingsBefore, destIndexName, l)) + .andThen(l -> addBlockIfFromSource(READ_ONLY, settingsBefore, destIndexName, l)) .andThenApply(ignored -> new ReindexDataStreamIndexAction.Response(destIndexName)) .addListener(listener); } private void setBlockWrites(String sourceIndexName, ActionListener listener) { logger.debug("Setting write block on source index [{}]", sourceIndexName); - final Settings readOnlySettings = Settings.builder().put(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(), true).build(); - var updateSettingsRequest = new UpdateSettingsRequest(readOnlySettings, sourceIndexName); - client.admin().indices().updateSettings(updateSettingsRequest, new ActionListener<>() { + addBlockToIndex(WRITE, sourceIndexName, new ActionListener<>() { @Override - public void onResponse(AcknowledgedResponse response) { + public void onResponse(AddIndexBlockResponse response) { if (response.isAcknowledged()) { listener.onResponse(null); } else { @@ -121,7 +114,7 @@ public void onResponse(AcknowledgedResponse response) { @Override public void onFailure(Exception e) { if (e instanceof ClusterBlockException || e.getCause() instanceof ClusterBlockException) { - // It's fine if read-only is already set + // It's fine if block-writes is already set listener.onResponse(null); } else { listener.onFailure(e); @@ -138,18 +131,23 @@ private void deleteDestIfExists(String destIndexName, ActionListener listener) { + private void createIndex(IndexMetadata sourceIndex, String destIndexName, ActionListener listener) { logger.debug("Creating destination index [{}] for source index [{}]", destIndexName, sourceIndex.getIndex().getName()); - // Create destination with subset of source index settings that can be added before reindex - var settings = getPreSettings(sourceIndex); - - var sourceMapping = sourceIndex.mapping(); - Map mapping = sourceMapping != null ? sourceMapping.rawSourceAsMap() : Map.of(); - var createIndexRequest = new CreateIndexRequest(destIndexName).settings(settings).mapping(mapping); - - var errorMessage = String.format(Locale.ROOT, "Could not create index [%s]", destIndexName); - client.admin().indices().create(createIndexRequest, failIfNotAcknowledged(listener, errorMessage)); + // override read-only settings if they exist + var removeReadOnlyOverride = Settings.builder() + .putNull(IndexMetadata.SETTING_READ_ONLY) + .putNull(IndexMetadata.SETTING_BLOCKS_WRITE) + .build(); + + var request = new CreateIndexFromSourceAction.Request( + sourceIndex.getIndex().getName(), + destIndexName, + removeReadOnlyOverride, + Map.of() + ); + var errorMessage = String.format(Locale.ROOT, "Could not create index [%s]", request.getDestIndex()); + client.execute(CreateIndexFromSourceAction.INSTANCE, request, failIfNotAcknowledged(listener, errorMessage)); } private void reindex(String sourceIndexName, String destIndexName, ActionListener listener) { @@ -162,35 +160,18 @@ private void reindex(String sourceIndexName, String destIndexName, ActionListene client.execute(ReindexAction.INSTANCE, reindexRequest, listener); } - private void updateSettings(Settings settingsBefore, String destIndexName, ActionListener listener) { - logger.debug("Adding settings from source index that could not be added before reindex"); - - Settings postSettings = getPostSettings(settingsBefore); - if (postSettings.isEmpty()) { + private void addBlockIfFromSource( + IndexMetadata.APIBlock block, + Settings settingsBefore, + String destIndexName, + ActionListener listener + ) { + if (settingsBefore.getAsBoolean(block.settingName(), false)) { + var errorMessage = String.format(Locale.ROOT, "Add [%s] block to index [%s] was not acknowledged", block.name(), destIndexName); + addBlockToIndex(block, destIndexName, failIfNotAcknowledged(listener, errorMessage)); + } else { listener.onResponse(null); - return; } - - var updateSettingsRequest = new UpdateSettingsRequest(postSettings, destIndexName); - var errorMessage = String.format(Locale.ROOT, "Could not update settings on index [%s]", destIndexName); - client.admin().indices().updateSettings(updateSettingsRequest, failIfNotAcknowledged(listener, errorMessage)); - } - - // Filter source index settings to subset of settings that can be included during reindex. - // Similar to the settings filtering done when reindexing for upgrade in Kibana - // https://github.com/elastic/kibana/blob/8a8363f02cc990732eb9cbb60cd388643a336bed/x-pack - // /plugins/upgrade_assistant/server/lib/reindexing/index_settings.ts#L155 - private Settings getPreSettings(IndexMetadata sourceIndex) { - // filter settings that will be added back later - var filtered = sourceIndex.getSettings().filter(settingName -> SETTINGS_TO_ADD_BACK.contains(settingName) == false); - - // filter private and non-copyable settings - var builder = MetadataCreateIndexService.copySettingsFromSource(false, filtered, indexScopedSettings, Settings.builder()); - return builder.build(); - } - - private Settings getPostSettings(Settings settingsBefore) { - return settingsBefore.filter(SETTINGS_TO_ADD_BACK::contains); } public static String generateDestIndexName(String sourceIndex) { @@ -201,11 +182,16 @@ private static ActionListener failIfNotAckno ActionListener listener, String errorMessage ) { - return listener.delegateFailureAndWrap((delegate, response) -> { + return listener.delegateFailure((delegate, response) -> { if (response.isAcknowledged()) { delegate.onResponse(null); + } else { + delegate.onFailure(new ElasticsearchException(errorMessage)); } - throw new ElasticsearchException(errorMessage); }); } + + private void addBlockToIndex(IndexMetadata.APIBlock block, String index, ActionListener listener) { + client.admin().indices().execute(TransportAddIndexBlockAction.TYPE, new AddIndexBlockRequest(block, index), listener); + } } diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java index 494be303980a7..dc8e33bc091e6 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java @@ -9,8 +9,15 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.rollover.RolloverAction; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.datastreams.GetDataStreamAction; +import org.elasticsearch.action.datastreams.ModifyDataStreamsAction; +import org.elasticsearch.action.support.CountDownActionListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamAction; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; @@ -20,9 +27,13 @@ import org.elasticsearch.persistent.PersistentTasksExecutor; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.migrate.action.ReindexDataStreamIndexAction; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.NoSuchElementException; import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.getOldIndexVersionPredicate; @@ -72,22 +83,109 @@ protected void nodeOperation(AllocatedPersistentTask task, ReindexDataStreamTask reindexClient.execute(GetDataStreamAction.INSTANCE, request, ActionListener.wrap(response -> { List dataStreamInfos = response.getDataStreams(); if (dataStreamInfos.size() == 1) { - List indices = dataStreamInfos.getFirst().getDataStream().getIndices(); - List indicesToBeReindexed = indices.stream() - .filter(getOldIndexVersionPredicate(clusterService.state().metadata())) - .toList(); - reindexDataStreamTask.setPendingIndicesCount(indicesToBeReindexed.size()); - for (Index index : indicesToBeReindexed) { - reindexDataStreamTask.incrementInProgressIndicesCount(index.getName()); - // TODO This is just a placeholder. This is where the real data stream reindex logic will go - reindexDataStreamTask.reindexSucceeded(index.getName()); + DataStream dataStream = dataStreamInfos.getFirst().getDataStream(); + if (getOldIndexVersionPredicate(clusterService.state().metadata()).test(dataStream.getWriteIndex())) { + reindexClient.execute( + RolloverAction.INSTANCE, + new RolloverRequest(sourceDataStream, null), + ActionListener.wrap( + rolloverResponse -> reindexIndices(dataStream, reindexDataStreamTask, reindexClient, sourceDataStream), + e -> completeFailedPersistentTask(reindexDataStreamTask, e) + ) + ); + } else { + reindexIndices(dataStream, reindexDataStreamTask, reindexClient, sourceDataStream); } - - completeSuccessfulPersistentTask(reindexDataStreamTask); } else { completeFailedPersistentTask(reindexDataStreamTask, new ElasticsearchException("data stream does not exist")); } - }, reindexDataStreamTask::markAsFailed)); + }, exception -> completeFailedPersistentTask(reindexDataStreamTask, exception))); + } + + private void reindexIndices( + DataStream dataStream, + ReindexDataStreamTask reindexDataStreamTask, + ExecuteWithHeadersClient reindexClient, + String sourceDataStream + ) { + List indices = dataStream.getIndices(); + List indicesToBeReindexed = indices.stream().filter(getOldIndexVersionPredicate(clusterService.state().metadata())).toList(); + reindexDataStreamTask.setPendingIndicesCount(indicesToBeReindexed.size()); + // The CountDownActionListener is 1 more than the number of indices so that the count is not 0 if we have no indices + CountDownActionListener listener = new CountDownActionListener(indicesToBeReindexed.size() + 1, ActionListener.wrap(response1 -> { + completeSuccessfulPersistentTask(reindexDataStreamTask); + }, exception -> { completeFailedPersistentTask(reindexDataStreamTask, exception); })); + List indicesRemaining = Collections.synchronizedList(new ArrayList<>(indicesToBeReindexed)); + final int maxConcurrentIndices = 1; + for (int i = 0; i < maxConcurrentIndices; i++) { + maybeProcessNextIndex(indicesRemaining, reindexDataStreamTask, reindexClient, sourceDataStream, listener); + } + // This takes care of the additional latch count referenced above: + listener.onResponse(null); + } + + private void maybeProcessNextIndex( + List indicesRemaining, + ReindexDataStreamTask reindexDataStreamTask, + ExecuteWithHeadersClient reindexClient, + String sourceDataStream, + CountDownActionListener listener + ) { + if (indicesRemaining.isEmpty()) { + return; + } + Index index; + try { + index = indicesRemaining.removeFirst(); + } catch (NoSuchElementException e) { + return; + } + reindexDataStreamTask.incrementInProgressIndicesCount(index.getName()); + reindexClient.execute( + ReindexDataStreamIndexAction.INSTANCE, + new ReindexDataStreamIndexAction.Request(index.getName()), + ActionListener.wrap(response1 -> { + updateDataStream(sourceDataStream, index.getName(), response1.getDestIndex(), ActionListener.wrap(unused -> { + reindexDataStreamTask.reindexSucceeded(index.getName()); + listener.onResponse(null); + maybeProcessNextIndex(indicesRemaining, reindexDataStreamTask, reindexClient, sourceDataStream, listener); + }, exception -> { + reindexDataStreamTask.reindexFailed(index.getName(), exception); + listener.onResponse(null); + }), reindexClient); + }, exception -> { + reindexDataStreamTask.reindexFailed(index.getName(), exception); + listener.onResponse(null); + }) + ); + } + + private void updateDataStream( + String dataStream, + String oldIndex, + String newIndex, + ActionListener listener, + ExecuteWithHeadersClient reindexClient + ) { + reindexClient.execute( + ModifyDataStreamsAction.INSTANCE, + new ModifyDataStreamsAction.Request( + TimeValue.MAX_VALUE, + TimeValue.MAX_VALUE, + List.of(DataStreamAction.removeBackingIndex(dataStream, oldIndex), DataStreamAction.addBackingIndex(dataStream, newIndex)) + ), + new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse response) { + listener.onResponse(null); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + } + ); } private void completeSuccessfulPersistentTask(ReindexDataStreamTask persistentTask) { @@ -105,6 +203,9 @@ private TimeValue getTimeToLive(ReindexDataStreamTask reindexDataStreamTask) { PersistentTasksCustomMetadata.PersistentTask persistentTask = persistentTasksCustomMetadata.getTask( reindexDataStreamTask.getPersistentTaskId() ); + if (persistentTask == null) { + return TimeValue.timeValueMillis(0); + } PersistentTaskState state = persistentTask.getState(); final long completionTime; if (state == null) { diff --git a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamEnrichedStatusTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamEnrichedStatusTests.java index acd8cd1a6add2..993db1096aac2 100644 --- a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamEnrichedStatusTests.java +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamEnrichedStatusTests.java @@ -74,7 +74,7 @@ private List randomList(int minSize) { } private Set randomSet(int minSize) { - return randomSet(minSize, 100, () -> randomAlphaOfLength(50)); + return randomSet(minSize, Math.max(minSize, 100), () -> randomAlphaOfLength(50)); } private List> randomErrorList() { diff --git a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatusTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatusTests.java index 47e2d02bee3b0..ad47eb6a23cd7 100644 --- a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatusTests.java +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatusTests.java @@ -70,7 +70,7 @@ private List randomList(int minSize) { } private Set randomSet(int minSize) { - return randomSet(minSize, 100, () -> randomAlphaOfLength(50)); + return randomSet(minSize, Math.max(minSize, 100), () -> randomAlphaOfLength(50)); } private List> randomErrorList() { diff --git a/x-pack/plugin/rank-rrf/build.gradle b/x-pack/plugin/rank-rrf/build.gradle index 2c3f217243aa4..b2d470c6618ea 100644 --- a/x-pack/plugin/rank-rrf/build.gradle +++ b/x-pack/plugin/rank-rrf/build.gradle @@ -22,6 +22,7 @@ dependencies { testImplementation(testArtifact(project(xpackModule('core')))) testImplementation(testArtifact(project(':server'))) + clusterModules project(':modules:mapper-extras') clusterModules project(xpackModule('rank-rrf')) clusterModules project(xpackModule('inference')) clusterModules project(':modules:lang-painless') diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java index f1171b74f7468..c1447623dd5b1 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java @@ -48,7 +48,6 @@ public final class RRFRetrieverBuilder extends CompoundRetrieverBuilder invalidateAccessToken( tokenService.prependVersionAndEncodeAccessToken( - TransportVersions.MINIMUM_COMPATIBLE, - tokenService.getRandomTokenBytes(TransportVersions.MINIMUM_COMPATIBLE, randomBoolean()).v1() + TransportVersions.V_7_3_2, + tokenService.getRandomTokenBytes(TransportVersions.V_7_3_2, randomBoolean()).v1() ) ) ); @@ -347,7 +347,7 @@ public void testInvalidateNotValidAccessTokens() throws Exception { byte[] longerAccessToken = new byte[randomIntBetween(17, 24)]; random().nextBytes(longerAccessToken); invalidateResponse = invalidateAccessToken( - tokenService.prependVersionAndEncodeAccessToken(TransportVersions.MINIMUM_COMPATIBLE, longerAccessToken) + tokenService.prependVersionAndEncodeAccessToken(TransportVersions.V_7_3_2, longerAccessToken) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); assertThat(invalidateResponse.previouslyInvalidated(), equalTo(0)); @@ -365,7 +365,7 @@ public void testInvalidateNotValidAccessTokens() throws Exception { byte[] shorterAccessToken = new byte[randomIntBetween(12, 15)]; random().nextBytes(shorterAccessToken); invalidateResponse = invalidateAccessToken( - tokenService.prependVersionAndEncodeAccessToken(TransportVersions.MINIMUM_COMPATIBLE, shorterAccessToken) + tokenService.prependVersionAndEncodeAccessToken(TransportVersions.V_7_3_2, shorterAccessToken) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); assertThat(invalidateResponse.previouslyInvalidated(), equalTo(0)); @@ -394,8 +394,8 @@ public void testInvalidateNotValidAccessTokens() throws Exception { invalidateResponse = invalidateAccessToken( tokenService.prependVersionAndEncodeAccessToken( - TransportVersions.MINIMUM_COMPATIBLE, - tokenService.getRandomTokenBytes(TransportVersions.MINIMUM_COMPATIBLE, randomBoolean()).v1() + TransportVersions.V_7_3_2, + tokenService.getRandomTokenBytes(TransportVersions.V_7_3_2, randomBoolean()).v1() ) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); @@ -420,8 +420,8 @@ public void testInvalidateNotValidRefreshTokens() throws Exception { ResponseException.class, () -> invalidateRefreshToken( TokenService.prependVersionAndEncodeRefreshToken( - TransportVersions.MINIMUM_COMPATIBLE, - tokenService.getRandomTokenBytes(TransportVersions.MINIMUM_COMPATIBLE, true).v2() + TransportVersions.V_7_3_2, + tokenService.getRandomTokenBytes(TransportVersions.V_7_3_2, true).v2() ) ) ); @@ -441,7 +441,7 @@ public void testInvalidateNotValidRefreshTokens() throws Exception { byte[] longerRefreshToken = new byte[randomIntBetween(17, 24)]; random().nextBytes(longerRefreshToken); invalidateResponse = invalidateRefreshToken( - TokenService.prependVersionAndEncodeRefreshToken(TransportVersions.MINIMUM_COMPATIBLE, longerRefreshToken) + TokenService.prependVersionAndEncodeRefreshToken(TransportVersions.V_7_3_2, longerRefreshToken) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); assertThat(invalidateResponse.previouslyInvalidated(), equalTo(0)); @@ -459,7 +459,7 @@ public void testInvalidateNotValidRefreshTokens() throws Exception { byte[] shorterRefreshToken = new byte[randomIntBetween(12, 15)]; random().nextBytes(shorterRefreshToken); invalidateResponse = invalidateRefreshToken( - TokenService.prependVersionAndEncodeRefreshToken(TransportVersions.MINIMUM_COMPATIBLE, shorterRefreshToken) + TokenService.prependVersionAndEncodeRefreshToken(TransportVersions.V_7_3_2, shorterRefreshToken) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); assertThat(invalidateResponse.previouslyInvalidated(), equalTo(0)); @@ -488,8 +488,8 @@ public void testInvalidateNotValidRefreshTokens() throws Exception { invalidateResponse = invalidateRefreshToken( TokenService.prependVersionAndEncodeRefreshToken( - TransportVersions.MINIMUM_COMPATIBLE, - tokenService.getRandomTokenBytes(TransportVersions.MINIMUM_COMPATIBLE, true).v2() + TransportVersions.V_7_3_2, + tokenService.getRandomTokenBytes(TransportVersions.V_7_3_2, true).v2() ) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); @@ -758,11 +758,18 @@ public void testAuthenticateWithWrongToken() throws Exception { assertAuthenticateWithToken(response.accessToken(), TEST_USER_NAME); // Now attempt to authenticate with an invalid access token string assertUnauthorizedToken(randomAlphaOfLengthBetween(0, 128)); - // Now attempt to authenticate with an invalid access token with valid structure (after 8.0 pre 8.10) + // Now attempt to authenticate with an invalid access token with valid structure (pre 7.2) assertUnauthorizedToken( tokenService.prependVersionAndEncodeAccessToken( - TransportVersions.V_8_0_0, - tokenService.getRandomTokenBytes(TransportVersions.V_8_0_0, randomBoolean()).v1() + TransportVersions.V_7_1_0, + tokenService.getRandomTokenBytes(TransportVersions.V_7_1_0, randomBoolean()).v1() + ) + ); + // Now attempt to authenticate with an invalid access token with valid structure (after 7.2 pre 8.10) + assertUnauthorizedToken( + tokenService.prependVersionAndEncodeAccessToken( + TransportVersions.V_7_4_0, + tokenService.getRandomTokenBytes(TransportVersions.V_7_4_0, randomBoolean()).v1() ) ); // Now attempt to authenticate with an invalid access token with valid structure (current version) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index 900436a1fd874..4f7ba7808b823 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -48,7 +48,9 @@ import org.elasticsearch.common.cache.CacheBuilder; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.InputStreamStreamInput; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -57,6 +59,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Streams; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -90,8 +93,10 @@ import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; import java.io.Closeable; import java.io.IOException; +import java.io.OutputStream; import java.io.UncheckedIOException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; @@ -127,6 +132,7 @@ import javax.crypto.Cipher; import javax.crypto.CipherInputStream; +import javax.crypto.CipherOutputStream; import javax.crypto.NoSuchPaddingException; import javax.crypto.SecretKey; import javax.crypto.SecretKeyFactory; @@ -195,8 +201,14 @@ public class TokenService { // UUIDs are 16 bytes encoded base64 without padding, therefore the length is (16 / 3) * 4 + ((16 % 3) * 8 + 5) / 6 chars private static final int TOKEN_LENGTH = 22; private static final String TOKEN_DOC_ID_PREFIX = TOKEN_DOC_TYPE + "_"; + static final int LEGACY_MINIMUM_BYTES = VERSION_BYTES + SALT_BYTES + IV_BYTES + 1; static final int MINIMUM_BYTES = VERSION_BYTES + TOKEN_LENGTH + 1; + static final int LEGACY_MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * LEGACY_MINIMUM_BYTES) / 3)).intValue(); public static final int MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * MINIMUM_BYTES) / 3)).intValue(); + static final TransportVersion VERSION_HASHED_TOKENS = TransportVersions.V_7_2_0; + static final TransportVersion VERSION_TOKENS_INDEX_INTRODUCED = TransportVersions.V_7_2_0; + static final TransportVersion VERSION_ACCESS_TOKENS_AS_UUIDS = TransportVersions.V_7_2_0; + static final TransportVersion VERSION_MULTIPLE_CONCURRENT_REFRESHES = TransportVersions.V_7_2_0; static final TransportVersion VERSION_CLIENT_AUTH_FOR_REFRESH = TransportVersions.V_8_2_0; static final TransportVersion VERSION_GET_TOKEN_DOC_FOR_REFRESH = TransportVersions.V_8_10_X; @@ -261,7 +273,8 @@ public TokenService( /** * Creates an access token and optionally a refresh token as well, based on the provided authentication and metadata with - * auto-generated values. The created tokens are stored a specific security tokens index. + * auto-generated values. The created tokens are stored in the security index for versions up to + * {@link #VERSION_TOKENS_INDEX_INTRODUCED} and to a specific security tokens index for later versions. */ public void createOAuth2Tokens( Authentication authentication, @@ -278,7 +291,8 @@ public void createOAuth2Tokens( /** * Creates an access token and optionally a refresh token as well from predefined values, based on the provided authentication and - * metadata. The created tokens are stored in a specific security tokens index. + * metadata. The created tokens are stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED} and to a + * specific security tokens index for later versions. */ // public for testing public void createOAuth2Tokens( @@ -300,15 +314,21 @@ public void createOAuth2Tokens( * * @param accessTokenBytes The predefined seed value for the access token. This will then be *

    - *
  • Hashed before stored
  • - *
  • Stored in a specific security tokens index
  • + *
  • Encrypted before stored for versions before {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Hashed before stored for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in a specific security tokens index for versions after + * {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • *
  • Prepended with a version ID and Base64 encoded before returned to the caller of the APIs
  • *
* @param refreshTokenBytes The predefined seed value for the access token. This will then be *
    - *
  • Hashed before stored
  • - *
  • Stored in a specific security tokens index
  • - *
  • Prepended with a version ID and Base64 encoded before returned to the caller of the APIs
  • + *
  • Hashed before stored for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in a specific security tokens index for versions after + * {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Prepended with a version ID and encoded with Base64 before returned to the caller of the APIs + * for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • *
* @param tokenVersion The version of the nodes with which these tokens will be compatible. * @param authentication The authentication object representing the user for which the tokens are created @@ -364,7 +384,7 @@ private void createOAuth2Tokens( } else { refreshTokenToStore = refreshTokenToReturn = null; } - } else { + } else if (tokenVersion.onOrAfter(VERSION_HASHED_TOKENS)) { assert accessTokenBytes.length == RAW_TOKEN_BYTES_LENGTH; userTokenId = hashTokenString(Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(accessTokenBytes)); accessTokenToStore = null; @@ -375,6 +395,18 @@ private void createOAuth2Tokens( } else { refreshTokenToStore = refreshTokenToReturn = null; } + } else { + assert accessTokenBytes.length == RAW_TOKEN_BYTES_LENGTH; + userTokenId = Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(accessTokenBytes); + accessTokenToStore = null; + if (refreshTokenBytes != null) { + assert refreshTokenBytes.length == RAW_TOKEN_BYTES_LENGTH; + refreshTokenToStore = refreshTokenToReturn = Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString( + refreshTokenBytes + ); + } else { + refreshTokenToStore = refreshTokenToReturn = null; + } } UserToken userToken = new UserToken(userTokenId, tokenVersion, tokenAuth, getExpirationTime(), metadata); tokenDocument = createTokenDocument(userToken, accessTokenToStore, refreshTokenToStore, originatingClientAuth); @@ -387,22 +419,23 @@ private void createOAuth2Tokens( final RefreshPolicy tokenCreationRefreshPolicy = tokenVersion.onOrAfter(VERSION_GET_TOKEN_DOC_FOR_REFRESH) ? RefreshPolicy.NONE : RefreshPolicy.WAIT_UNTIL; + final SecurityIndexManager tokensIndex = getTokensIndexForVersion(tokenVersion); logger.debug( () -> format( "Using refresh policy [%s] when creating token doc [%s] in the security index [%s]", tokenCreationRefreshPolicy, documentId, - securityTokensIndex.aliasName() + tokensIndex.aliasName() ) ); - final IndexRequest indexTokenRequest = client.prepareIndex(securityTokensIndex.aliasName()) + final IndexRequest indexTokenRequest = client.prepareIndex(tokensIndex.aliasName()) .setId(documentId) .setOpType(OpType.CREATE) .setSource(tokenDocument, XContentType.JSON) .setRefreshPolicy(tokenCreationRefreshPolicy) .request(); - securityTokensIndex.prepareIndexIfNeededThenExecute( - ex -> listener.onFailure(traceLog("prepare tokens index [" + securityTokensIndex.aliasName() + "]", documentId, ex)), + tokensIndex.prepareIndexIfNeededThenExecute( + ex -> listener.onFailure(traceLog("prepare tokens index [" + tokensIndex.aliasName() + "]", documentId, ex)), () -> executeAsyncWithOrigin( client, SECURITY_ORIGIN, @@ -521,16 +554,17 @@ private void getTokenDocById( @Nullable String storedRefreshToken, ActionListener listener ) { - final SecurityIndexManager frozenTokensIndex = securityTokensIndex.defensiveCopy(); + final SecurityIndexManager tokensIndex = getTokensIndexForVersion(tokenVersion); + final SecurityIndexManager frozenTokensIndex = tokensIndex.defensiveCopy(); if (frozenTokensIndex.isAvailable(PRIMARY_SHARDS) == false) { - logger.warn("failed to get access token [{}] because index [{}] is not available", tokenId, securityTokensIndex.aliasName()); + logger.warn("failed to get access token [{}] because index [{}] is not available", tokenId, tokensIndex.aliasName()); listener.onFailure(frozenTokensIndex.getUnavailableReason(PRIMARY_SHARDS)); return; } - final GetRequest getRequest = client.prepareGet(securityTokensIndex.aliasName(), getTokenDocumentId(tokenId)).request(); + final GetRequest getRequest = client.prepareGet(tokensIndex.aliasName(), getTokenDocumentId(tokenId)).request(); final Consumer onFailure = ex -> listener.onFailure(traceLog("get token from id", tokenId, ex)); - securityTokensIndex.checkIndexVersionThenExecute( - ex -> listener.onFailure(traceLog("prepare tokens index [" + securityTokensIndex.aliasName() + "]", tokenId, ex)), + tokensIndex.checkIndexVersionThenExecute( + ex -> listener.onFailure(traceLog("prepare tokens index [" + tokensIndex.aliasName() + "]", tokenId, ex)), () -> executeAsyncWithOrigin( client.threadPool().getThreadContext(), SECURITY_ORIGIN, @@ -576,11 +610,7 @@ private void getTokenDocById( // if the index or the shard is not there / available we assume that // the token is not valid if (isShardNotAvailableException(e)) { - logger.warn( - "failed to get token doc [{}] because index [{}] is not available", - tokenId, - securityTokensIndex.aliasName() - ); + logger.warn("failed to get token doc [{}] because index [{}] is not available", tokenId, tokensIndex.aliasName()); } else { logger.error(() -> "failed to get token doc [" + tokenId + "]", e); } @@ -620,7 +650,7 @@ void decodeToken(String token, boolean validateUserToken, ActionListener VERSION_ACCESS_TOKENS_UUIDS cluster if (in.available() < MINIMUM_BYTES) { logger.debug("invalid token, smaller than [{}] bytes", MINIMUM_BYTES); @@ -630,6 +660,41 @@ void decodeToken(String token, boolean validateUserToken, ActionListener { + if (decodeKey != null) { + try { + final Cipher cipher = getDecryptionCipher(iv, decodeKey, version, decodedSalt); + final String tokenId = decryptTokenId(encryptedTokenId, cipher, version); + getAndValidateUserToken(tokenId, version, null, validateUserToken, listener); + } catch (IOException | GeneralSecurityException e) { + // could happen with a token that is not ours + logger.warn("invalid token", e); + listener.onResponse(null); + } + } else { + // could happen with a token that is not ours + listener.onResponse(null); + } + }, listener::onFailure)); + } else { + logger.debug(() -> format("invalid key %s key: %s", passphraseHash, keyCache.cache.keySet())); + listener.onResponse(null); + } } } catch (Exception e) { // could happen with a token that is not ours @@ -787,7 +852,11 @@ private void indexInvalidation( final Set idsOfOlderTokens = new HashSet<>(); boolean anyOlderTokensBeforeRefreshViaGet = false; for (UserToken userToken : userTokens) { - idsOfRecentTokens.add(userToken.getId()); + if (userToken.getTransportVersion().onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED)) { + idsOfRecentTokens.add(userToken.getId()); + } else { + idsOfOlderTokens.add(userToken.getId()); + } anyOlderTokensBeforeRefreshViaGet |= userToken.getTransportVersion().before(VERSION_GET_TOKEN_DOC_FOR_REFRESH); } final RefreshPolicy tokensInvalidationRefreshPolicy = anyOlderTokensBeforeRefreshViaGet @@ -1055,7 +1124,7 @@ private void findTokenFromRefreshToken(String refreshToken, Iterator ); getTokenDocById(userTokenId, version, null, storedRefreshToken, listener); } - } else { + } else if (version.onOrAfter(VERSION_HASHED_TOKENS)) { final String unencodedRefreshToken = in.readString(); if (unencodedRefreshToken.length() != TOKEN_LENGTH) { logger.debug("Decoded refresh token [{}] with version [{}] is invalid.", unencodedRefreshToken, version); @@ -1064,6 +1133,9 @@ private void findTokenFromRefreshToken(String refreshToken, Iterator final String hashedRefreshToken = hashTokenString(unencodedRefreshToken); findTokenFromRefreshToken(hashedRefreshToken, securityTokensIndex, backoff, listener); } + } else { + logger.debug("Unrecognized refresh token version [{}].", version); + listener.onResponse(null); } } catch (IOException e) { logger.debug(() -> "Could not decode refresh token [" + refreshToken + "].", e); @@ -1178,6 +1250,7 @@ private void innerRefresh( return; } final RefreshTokenStatus refreshTokenStatus = checkRefreshResult.v1(); + final SecurityIndexManager refreshedTokenIndex = getTokensIndexForVersion(refreshTokenStatus.getTransportVersion()); if (refreshTokenStatus.isRefreshed()) { logger.debug( "Token document [{}] was recently refreshed, when a new token document was generated. Reusing that result.", @@ -1185,29 +1258,31 @@ private void innerRefresh( ); final Tuple parsedTokens = parseTokensFromDocument(tokenDoc.sourceAsMap(), null); Authentication authentication = parsedTokens.v1().getAuthentication(); - decryptAndReturnSupersedingTokens(refreshToken, refreshTokenStatus, securityTokensIndex, authentication, listener); + decryptAndReturnSupersedingTokens(refreshToken, refreshTokenStatus, refreshedTokenIndex, authentication, listener); } else { final TransportVersion newTokenVersion = getTokenVersionCompatibility(); final Tuple newTokenBytes = getRandomTokenBytes(newTokenVersion, true); final Map updateMap = new HashMap<>(); updateMap.put("refreshed", true); - updateMap.put("refresh_time", clock.instant().toEpochMilli()); - try { - final byte[] iv = getRandomBytes(IV_BYTES); - final byte[] salt = getRandomBytes(SALT_BYTES); - String encryptedAccessAndRefreshToken = encryptSupersedingTokens( - newTokenBytes.v1(), - newTokenBytes.v2(), - refreshToken, - iv, - salt - ); - updateMap.put("superseding.encrypted_tokens", encryptedAccessAndRefreshToken); - updateMap.put("superseding.encryption_iv", Base64.getEncoder().encodeToString(iv)); - updateMap.put("superseding.encryption_salt", Base64.getEncoder().encodeToString(salt)); - } catch (GeneralSecurityException e) { - logger.warn("could not encrypt access token and refresh token string", e); - onFailure.accept(invalidGrantException("could not refresh the requested token")); + if (newTokenVersion.onOrAfter(VERSION_MULTIPLE_CONCURRENT_REFRESHES)) { + updateMap.put("refresh_time", clock.instant().toEpochMilli()); + try { + final byte[] iv = getRandomBytes(IV_BYTES); + final byte[] salt = getRandomBytes(SALT_BYTES); + String encryptedAccessAndRefreshToken = encryptSupersedingTokens( + newTokenBytes.v1(), + newTokenBytes.v2(), + refreshToken, + iv, + salt + ); + updateMap.put("superseding.encrypted_tokens", encryptedAccessAndRefreshToken); + updateMap.put("superseding.encryption_iv", Base64.getEncoder().encodeToString(iv)); + updateMap.put("superseding.encryption_salt", Base64.getEncoder().encodeToString(salt)); + } catch (GeneralSecurityException e) { + logger.warn("could not encrypt access token and refresh token string", e); + onFailure.accept(invalidGrantException("could not refresh the requested token")); + } } assert tokenDoc.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO : "expected an assigned sequence number"; assert tokenDoc.primaryTerm() != SequenceNumbers.UNASSIGNED_PRIMARY_TERM : "expected an assigned primary term"; @@ -1218,17 +1293,17 @@ private void innerRefresh( "Using refresh policy [%s] when updating token doc [%s] for refresh in the security index [%s]", tokenRefreshUpdateRefreshPolicy, tokenDoc.id(), - securityTokensIndex.aliasName() + refreshedTokenIndex.aliasName() ) ); - final UpdateRequestBuilder updateRequest = client.prepareUpdate(securityTokensIndex.aliasName(), tokenDoc.id()) + final UpdateRequestBuilder updateRequest = client.prepareUpdate(refreshedTokenIndex.aliasName(), tokenDoc.id()) .setDoc("refresh_token", updateMap) .setFetchSource(logger.isDebugEnabled()) .setRefreshPolicy(tokenRefreshUpdateRefreshPolicy) .setIfSeqNo(tokenDoc.seqNo()) .setIfPrimaryTerm(tokenDoc.primaryTerm()); - securityTokensIndex.prepareIndexIfNeededThenExecute( - ex -> listener.onFailure(traceLog("prepare index [" + securityTokensIndex.aliasName() + "]", ex)), + refreshedTokenIndex.prepareIndexIfNeededThenExecute( + ex -> listener.onFailure(traceLog("prepare index [" + refreshedTokenIndex.aliasName() + "]", ex)), () -> executeAsyncWithOrigin( client.threadPool().getThreadContext(), SECURITY_ORIGIN, @@ -1274,7 +1349,7 @@ private void innerRefresh( if (cause instanceof VersionConflictEngineException) { // The document has been updated by another thread, get it again. logger.debug("version conflict while updating document [{}], attempting to get it again", tokenDoc.id()); - getTokenDocAsync(tokenDoc.id(), securityTokensIndex, true, new ActionListener<>() { + getTokenDocAsync(tokenDoc.id(), refreshedTokenIndex, true, new ActionListener<>() { @Override public void onResponse(GetResponse response) { if (response.isExists()) { @@ -1293,7 +1368,7 @@ public void onFailure(Exception e) { logger.info("could not get token document [{}] for refresh, retrying", tokenDoc.id()); client.threadPool() .schedule( - () -> getTokenDocAsync(tokenDoc.id(), securityTokensIndex, true, this), + () -> getTokenDocAsync(tokenDoc.id(), refreshedTokenIndex, true, this), backoff.next(), client.threadPool().generic() ); @@ -1614,13 +1689,17 @@ private static Optional checkMultipleRefreshes( RefreshTokenStatus refreshTokenStatus ) { if (refreshTokenStatus.isRefreshed()) { - if (refreshRequested.isAfter(refreshTokenStatus.getRefreshInstant().plus(30L, ChronoUnit.SECONDS))) { - return Optional.of(invalidGrantException("token has already been refreshed more than 30 seconds in the past")); - } - if (refreshRequested.isBefore(refreshTokenStatus.getRefreshInstant().minus(30L, ChronoUnit.SECONDS))) { - return Optional.of( - invalidGrantException("token has been refreshed more than 30 seconds in the future, clock skew too great") - ); + if (refreshTokenStatus.getTransportVersion().onOrAfter(VERSION_MULTIPLE_CONCURRENT_REFRESHES)) { + if (refreshRequested.isAfter(refreshTokenStatus.getRefreshInstant().plus(30L, ChronoUnit.SECONDS))) { + return Optional.of(invalidGrantException("token has already been refreshed more than 30 seconds in the past")); + } + if (refreshRequested.isBefore(refreshTokenStatus.getRefreshInstant().minus(30L, ChronoUnit.SECONDS))) { + return Optional.of( + invalidGrantException("token has been refreshed more than 30 seconds in the future, clock skew too great") + ); + } + } else { + return Optional.of(invalidGrantException("token has already been refreshed")); } } return Optional.empty(); @@ -1900,6 +1979,21 @@ private void ensureEnabled() { } } + /** + * In version {@code #VERSION_TOKENS_INDEX_INTRODUCED} security tokens were moved into a separate index, away from the other entities in + * the main security index, due to their ephemeral nature. They moved "seamlessly" - without manual user intervention. In this way, new + * tokens are created in the new index, while the existing ones were left in place - to be accessed from the old index - and due to be + * removed automatically by the {@code ExpiredTokenRemover} periodic job. Therefore, in general, when searching for a token we need to + * consider both the new and the old indices. + */ + private SecurityIndexManager getTokensIndexForVersion(TransportVersion version) { + if (version.onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED)) { + return securityTokensIndex; + } else { + return securityMainIndex; + } + } + public TimeValue getExpirationDelay() { return expirationDelay; } @@ -1928,13 +2022,41 @@ public String prependVersionAndEncodeAccessToken(TransportVersion version, byte[ out.writeByteArray(accessTokenBytes); return Base64.getEncoder().encodeToString(out.bytes().toBytesRef().bytes); } - } else { + } else if (version.onOrAfter(VERSION_ACCESS_TOKENS_AS_UUIDS)) { try (BytesStreamOutput out = new BytesStreamOutput(MINIMUM_BASE64_BYTES)) { out.setTransportVersion(version); TransportVersion.writeVersion(version, out); out.writeString(Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(accessTokenBytes)); return Base64.getEncoder().encodeToString(out.bytes().toBytesRef().bytes); } + } else { + // we know that the minimum length is larger than the default of the ByteArrayOutputStream so set the size to this explicitly + try ( + ByteArrayOutputStream os = new ByteArrayOutputStream(LEGACY_MINIMUM_BASE64_BYTES); + OutputStream base64 = Base64.getEncoder().wrap(os); + StreamOutput out = new OutputStreamStreamOutput(base64) + ) { + out.setTransportVersion(version); + KeyAndCache keyAndCache = keyCache.activeKeyCache; + TransportVersion.writeVersion(version, out); + out.writeByteArray(keyAndCache.getSalt().bytes); + out.writeByteArray(keyAndCache.getKeyHash().bytes); + final byte[] initializationVector = getRandomBytes(IV_BYTES); + out.writeByteArray(initializationVector); + try ( + CipherOutputStream encryptedOutput = new CipherOutputStream( + out, + getEncryptionCipher(initializationVector, keyAndCache, version) + ); + StreamOutput encryptedStreamOutput = new OutputStreamStreamOutput(encryptedOutput) + ) { + encryptedStreamOutput.setTransportVersion(version); + encryptedStreamOutput.writeString(Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(accessTokenBytes)); + // StreamOutput needs to be closed explicitly because it wraps CipherOutputStream + encryptedStreamOutput.close(); + return new String(os.toByteArray(), StandardCharsets.UTF_8); + } + } } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 702af75141093..75c2507a1dc5f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -126,6 +126,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; @@ -147,6 +148,7 @@ public class TokenServiceTests extends ESTestCase { private SecurityIndexManager securityMainIndex; private SecurityIndexManager securityTokensIndex; private ClusterService clusterService; + private DiscoveryNode pre72OldNode; private DiscoveryNode pre8500040OldNode; private Settings tokenServiceEnabledSettings = Settings.builder() .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true) @@ -226,12 +228,31 @@ public void setupClient() { licenseState = mock(MockLicenseState.class); when(licenseState.isAllowed(Security.TOKEN_SERVICE_FEATURE)).thenReturn(true); + if (randomBoolean()) { + // version 7.2 was an "inflection" point in the Token Service development (access_tokens as UUIDS, multiple concurrent + // refreshes, + // tokens docs on a separate index) + pre72OldNode = addAnother7071DataNode(this.clusterService); + } if (randomBoolean()) { // before refresh tokens used GET, i.e. TokenService#VERSION_GET_TOKEN_DOC_FOR_REFRESH pre8500040OldNode = addAnotherPre8500DataNode(this.clusterService); } } + private static DiscoveryNode addAnother7071DataNode(ClusterService clusterService) { + Version version; + TransportVersion transportVersion; + if (randomBoolean()) { + version = Version.V_7_0_0; + transportVersion = TransportVersions.V_7_0_0; + } else { + version = Version.V_7_1_0; + transportVersion = TransportVersions.V_7_1_0; + } + return addAnotherDataNodeWithVersion(clusterService, version, transportVersion); + } + private static DiscoveryNode addAnotherPre8500DataNode(ClusterService clusterService) { Version version; TransportVersion transportVersion; @@ -280,6 +301,53 @@ public static void shutdownThreadpool() { threadPool = null; } + public void testAttachAndGetToken() throws Exception { + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Token Service Key is used (to encrypt tokens) + if (null == pre72OldNode) { + pre72OldNode = addAnother7071DataNode(this.clusterService); + } + Authentication authentication = AuthenticationTestHelper.builder() + .user(new User("joe", "admin")) + .realmRef(new RealmRef("native_realm", "native", "node1")) + .build(false); + PlainActionFuture tokenFuture = new PlainActionFuture<>(); + Tuple newTokenBytes = tokenService.getRandomTokenBytes(randomBoolean()); + tokenService.createOAuth2Tokens( + newTokenBytes.v1(), + newTokenBytes.v2(), + authentication, + authentication, + Collections.emptyMap(), + tokenFuture + ); + final String accessToken = tokenFuture.get().getAccessToken(); + assertNotNull(accessToken); + mockGetTokenFromAccessTokenBytes(tokenService, newTokenBytes.v1(), authentication, false, null); + + ThreadContext requestContext = new ThreadContext(Settings.EMPTY); + requestContext.putHeader("Authorization", randomFrom("Bearer ", "BEARER ", "bearer ") + accessToken); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { + PlainActionFuture future = new PlainActionFuture<>(); + final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); + tokenService.tryAuthenticateToken(bearerToken, future); + UserToken serialized = future.get(); + assertAuthentication(authentication, serialized.getAuthentication()); + } + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { + // verify a second separate token service with its own salt can also verify + TokenService anotherService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + anotherService.refreshMetadata(tokenService.getTokenMetadata()); + PlainActionFuture future = new PlainActionFuture<>(); + final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); + anotherService.tryAuthenticateToken(bearerToken, future); + UserToken fromOtherService = future.get(); + assertAuthentication(authentication, fromOtherService.getAuthentication()); + } + } + public void testInvalidAuthorizationHeader() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); @@ -296,6 +364,89 @@ public void testInvalidAuthorizationHeader() throws Exception { } } + public void testPassphraseWorks() throws Exception { + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.1.0 nodes where the Key is actually used + if (null == pre72OldNode) { + pre72OldNode = addAnother7071DataNode(this.clusterService); + } + Authentication authentication = AuthenticationTestHelper.builder() + .user(new User("joe", "admin")) + .realmRef(new RealmRef("native_realm", "native", "node1")) + .build(false); + PlainActionFuture tokenFuture = new PlainActionFuture<>(); + Tuple newTokenBytes = tokenService.getRandomTokenBytes(randomBoolean()); + tokenService.createOAuth2Tokens( + newTokenBytes.v1(), + newTokenBytes.v2(), + authentication, + authentication, + Collections.emptyMap(), + tokenFuture + ); + final String accessToken = tokenFuture.get().getAccessToken(); + assertNotNull(accessToken); + mockGetTokenFromAccessTokenBytes(tokenService, newTokenBytes.v1(), authentication, false, null); + + ThreadContext requestContext = new ThreadContext(Settings.EMPTY); + storeTokenHeader(requestContext, accessToken); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { + PlainActionFuture future = new PlainActionFuture<>(); + final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); + tokenService.tryAuthenticateToken(bearerToken, future); + UserToken serialized = future.get(); + assertAuthentication(authentication, serialized.getAuthentication()); + } + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { + // verify a second separate token service with its own passphrase cannot verify + TokenService anotherService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + PlainActionFuture future = new PlainActionFuture<>(); + final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); + anotherService.tryAuthenticateToken(bearerToken, future); + assertNull(future.get()); + } + } + + public void testGetTokenWhenKeyCacheHasExpired() throws Exception { + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.1.0 nodes where the Key is actually used + if (null == pre72OldNode) { + pre72OldNode = addAnother7071DataNode(this.clusterService); + } + Authentication authentication = AuthenticationTestHelper.builder() + .user(new User("joe", "admin")) + .realmRef(new RealmRef("native_realm", "native", "node1")) + .build(false); + + PlainActionFuture tokenFuture = new PlainActionFuture<>(); + Tuple newTokenBytes = tokenService.getRandomTokenBytes(randomBoolean()); + tokenService.createOAuth2Tokens( + newTokenBytes.v1(), + newTokenBytes.v2(), + authentication, + authentication, + Collections.emptyMap(), + tokenFuture + ); + String accessToken = tokenFuture.get().getAccessToken(); + assertThat(accessToken, notNullValue()); + + tokenService.clearActiveKeyCache(); + + tokenService.createOAuth2Tokens( + newTokenBytes.v1(), + newTokenBytes.v2(), + authentication, + authentication, + Collections.emptyMap(), + tokenFuture + ); + accessToken = tokenFuture.get().getAccessToken(); + assertThat(accessToken, notNullValue()); + } + public void testAuthnWithInvalidatedToken() throws Exception { when(securityMainIndex.indexExists()).thenReturn(true); TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); @@ -669,6 +820,57 @@ public void testMalformedRefreshTokens() throws Exception { } } + public void testNonExistingPre72Token() throws Exception { + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // mock another random token so that we don't find a token in TokenService#getUserTokenFromId + Authentication authentication = AuthenticationTestHelper.builder() + .user(new User("joe", "admin")) + .realmRef(new RealmRef("native_realm", "native", "node1")) + .build(false); + mockGetTokenFromAccessTokenBytes(tokenService, tokenService.getRandomTokenBytes(randomBoolean()).v1(), authentication, false, null); + ThreadContext requestContext = new ThreadContext(Settings.EMPTY); + storeTokenHeader( + requestContext, + tokenService.prependVersionAndEncodeAccessToken( + TransportVersions.V_7_1_0, + tokenService.getRandomTokenBytes(TransportVersions.V_7_1_0, randomBoolean()).v1() + ) + ); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { + PlainActionFuture future = new PlainActionFuture<>(); + final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); + tokenService.tryAuthenticateToken(bearerToken, future); + assertNull(future.get()); + } + } + + public void testNonExistingUUIDToken() throws Exception { + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // mock another random token so that we don't find a token in TokenService#getUserTokenFromId + Authentication authentication = AuthenticationTestHelper.builder() + .user(new User("joe", "admin")) + .realmRef(new RealmRef("native_realm", "native", "node1")) + .build(false); + mockGetTokenFromAccessTokenBytes(tokenService, tokenService.getRandomTokenBytes(randomBoolean()).v1(), authentication, false, null); + ThreadContext requestContext = new ThreadContext(Settings.EMPTY); + TransportVersion uuidTokenVersion = randomFrom(TransportVersions.V_7_2_0, TransportVersions.V_7_3_2); + storeTokenHeader( + requestContext, + tokenService.prependVersionAndEncodeAccessToken( + uuidTokenVersion, + tokenService.getRandomTokenBytes(uuidTokenVersion, randomBoolean()).v1() + ) + ); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { + PlainActionFuture future = new PlainActionFuture<>(); + final SecureString bearerToken = Authenticator.extractBearerTokenFromHeader(requestContext); + tokenService.tryAuthenticateToken(bearerToken, future); + assertNull(future.get()); + } + } + public void testNonExistingLatestTokenVersion() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); // mock another random token so that we don't find a token in TokenService#getUserTokenFromId @@ -723,11 +925,18 @@ public void testIndexNotAvailable() throws Exception { return Void.TYPE; }).when(client).get(any(GetRequest.class), anyActionListener()); - final SecurityIndexManager tokensIndex = securityTokensIndex; - when(securityMainIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(false); - when(securityMainIndex.indexExists()).thenReturn(false); - when(securityMainIndex.defensiveCopy()).thenReturn(securityMainIndex); - + final SecurityIndexManager tokensIndex; + if (pre72OldNode != null) { + tokensIndex = securityMainIndex; + when(securityTokensIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(false); + when(securityTokensIndex.indexExists()).thenReturn(false); + when(securityTokensIndex.defensiveCopy()).thenReturn(securityTokensIndex); + } else { + tokensIndex = securityTokensIndex; + when(securityMainIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(false); + when(securityMainIndex.indexExists()).thenReturn(false); + when(securityMainIndex.defensiveCopy()).thenReturn(securityMainIndex); + } try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { PlainActionFuture future = new PlainActionFuture<>(); final SecureString bearerToken3 = Authenticator.extractBearerTokenFromHeader(requestContext); @@ -779,6 +988,7 @@ public void testGetAuthenticationWorksWithExpiredUserToken() throws Exception { } public void testSupersedingTokenEncryption() throws Exception { + assumeTrue("Superseding tokens are only created in post 7.2 clusters", pre72OldNode == null); TokenService tokenService = createTokenService(tokenServiceEnabledSettings, Clock.systemUTC()); Authentication authentication = AuthenticationTests.randomAuthentication(null, null); PlainActionFuture tokenFuture = new PlainActionFuture<>(); @@ -813,11 +1023,13 @@ public void testSupersedingTokenEncryption() throws Exception { authentication, tokenFuture ); - - assertThat( - tokenService.prependVersionAndEncodeAccessToken(version, newTokenBytes.v1()), - equalTo(tokenFuture.get().getAccessToken()) - ); + if (version.onOrAfter(TokenService.VERSION_ACCESS_TOKENS_AS_UUIDS)) { + // previous versions serialized the access token encrypted and the cipher text was different each time (due to different IVs) + assertThat( + tokenService.prependVersionAndEncodeAccessToken(version, newTokenBytes.v1()), + equalTo(tokenFuture.get().getAccessToken()) + ); + } assertThat( TokenService.prependVersionAndEncodeRefreshToken(version, newTokenBytes.v2()), equalTo(tokenFuture.get().getRefreshToken()) @@ -946,8 +1158,10 @@ public static String tokenDocIdFromAccessTokenBytes(byte[] accessTokenBytes, Tra MessageDigest userTokenIdDigest = sha256(); userTokenIdDigest.update(accessTokenBytes, RAW_TOKEN_BYTES_LENGTH, RAW_TOKEN_DOC_ID_BYTES_LENGTH); return Base64.getUrlEncoder().withoutPadding().encodeToString(userTokenIdDigest.digest()); - } else { + } else if (tokenVersion.onOrAfter(TokenService.VERSION_ACCESS_TOKENS_AS_UUIDS)) { return TokenService.hashTokenString(Base64.getUrlEncoder().withoutPadding().encodeToString(accessTokenBytes)); + } else { + return Base64.getUrlEncoder().withoutPadding().encodeToString(accessTokenBytes); } } @@ -964,9 +1178,12 @@ private void mockTokenForRefreshToken( if (userToken.getTransportVersion().onOrAfter(VERSION_GET_TOKEN_DOC_FOR_REFRESH)) { storedAccessToken = Base64.getUrlEncoder().withoutPadding().encodeToString(sha256().digest(accessTokenBytes)); storedRefreshToken = Base64.getUrlEncoder().withoutPadding().encodeToString(sha256().digest(refreshTokenBytes)); - } else { + } else if (userToken.getTransportVersion().onOrAfter(TokenService.VERSION_HASHED_TOKENS)) { storedAccessToken = null; storedRefreshToken = TokenService.hashTokenString(Base64.getUrlEncoder().withoutPadding().encodeToString(refreshTokenBytes)); + } else { + storedAccessToken = null; + storedRefreshToken = Base64.getUrlEncoder().withoutPadding().encodeToString(refreshTokenBytes); } final RealmRef realmRef = new RealmRef( refreshTokenStatus == null ? randomAlphaOfLength(6) : refreshTokenStatus.getAssociatedRealm(), diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle index 81eb82a522389..d4615260952d1 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle @@ -10,7 +10,8 @@ apply plugin: 'elasticsearch.rest-resources' dependencies { javaRestTestImplementation testArtifact(project(xpackModule('snapshot-repo-test-kit'))) - javaRestTestImplementation project(path: ':test:fixtures:hdfs-fixture', configuration:"shadow") + javaRestTestCompileOnly project(path: ':test:fixtures:hdfs-fixture') + javaRestTestRuntimeOnly project(path: ':test:fixtures:hdfs-fixture', configuration:"shadow") javaRestTestImplementation project(':test:fixtures:krb5kdc-fixture') javaRestTestImplementation "org.slf4j:slf4j-api:${versions.slf4j}" javaRestTestImplementation "org.slf4j:slf4j-simple:${versions.slf4j}" diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeIT.java index 83fbd7262461d..eb4515a897118 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeIT.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeIT.java @@ -36,7 +36,7 @@ protected void getGeoShapeMapping(XContentBuilder b) throws IOException { @Override protected IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomCompatibleVersion(random()); + return IndexVersionUtils.randomCompatibleWriteVersion(random()); } @Override diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoBoundingBoxQueryGeoShapeWithDocValuesIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoBoundingBoxQueryGeoShapeWithDocValuesIT.java index 3d91fb443aabd..4a6fa5d545bef 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoBoundingBoxQueryGeoShapeWithDocValuesIT.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoBoundingBoxQueryGeoShapeWithDocValuesIT.java @@ -40,6 +40,6 @@ public XContentBuilder getMapping() throws IOException { @Override public IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomCompatibleVersion(random()); + return IndexVersionUtils.randomCompatibleWriteVersion(random()); } } diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeWithDocValuesIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeWithDocValuesIT.java index b4d7a472591bd..0857b078be579 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeWithDocValuesIT.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoShapeWithDocValuesIT.java @@ -60,7 +60,7 @@ protected void getGeoShapeMapping(XContentBuilder b) throws IOException { @Override protected IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomCompatibleVersion(random()); + return IndexVersionUtils.randomCompatibleWriteVersion(random()); } @Override diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java index 4b13a7bf1f829..58fde288cfc60 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java @@ -10,7 +10,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.Orientation; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.utils.GeometryValidator; import org.elasticsearch.geometry.utils.WellKnownBinary; @@ -280,8 +279,6 @@ public void testInvalidCurrentVersion() { ); } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) - @AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") public void testGeoShapeLegacyMerge() throws Exception { IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0); MapperService m = createMapperService(version, fieldMapping(b -> b.field("type", getFieldName()))); diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java index d030a2bbf81ad..5d2624735bebe 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java @@ -113,8 +113,11 @@ public void testDefaultConfiguration() throws IOException { } public void testDefaultDocValueConfigurationOnPre8_4() throws IOException { - // TODO verify which version this test is actually valid for (when PR is actually merged) - IndexVersion oldVersion = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.V_8_3_0); + IndexVersion oldVersion = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersions.V_8_3_0 + ); DocumentMapper defaultMapper = createDocumentMapper(oldVersion, fieldMapping(this::minimalMapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper(FIELD_NAME); assertThat(fieldMapper, instanceOf(fieldMapperClass())); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml index cbaac4e47fdd4..fdb6746bbeed8 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml @@ -1,7 +1,7 @@ --- setup: - requires: - test_runner_features: [capabilities] + test_runner_features: [capabilities, contains] capabilities: - method: POST path: /_query @@ -75,4 +75,4 @@ non-lookup index: catch: "bad_request" - match: { error.type: "verification_exception" } - - match: { error.reason: "Found 1 problem\nline 1:43: invalid [test] resolution in lookup mode to an index in [standard] mode" } + - contains: { error.reason: "Found 1 problem\nline 1:43: invalid [test] resolution in lookup mode to an index in [standard] mode" } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataStreamsUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataStreamsUpgradeIT.java index 40ad5bba29baa..58556dd420ca6 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataStreamsUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataStreamsUpgradeIT.java @@ -11,15 +11,23 @@ import org.elasticsearch.client.Response; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.FormatNames; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.Strings; +import org.elasticsearch.xcontent.json.JsonXContent; import org.hamcrest.Matchers; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.time.Instant; import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.upgrades.IndexingIT.assertCount; +import static org.hamcrest.Matchers.equalTo; public class DataStreamsUpgradeIT extends AbstractUpgradeTestCase { @@ -164,4 +172,152 @@ public void testDataStreamValidationDoesNotBreakUpgrade() throws Exception { } } + public void testUpgradeDataStream() throws Exception { + String dataStreamName = "reindex_test_data_stream"; + int numRollovers = 5; + if (CLUSTER_TYPE == ClusterType.OLD) { + createAndRolloverDataStream(dataStreamName, numRollovers); + } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { + upgradeDataStream(dataStreamName, numRollovers); + } + } + + private static void createAndRolloverDataStream(String dataStreamName, int numRollovers) throws IOException { + // We want to create a data stream and roll it over several times so that we have several indices to upgrade + final String template = """ + { + "settings":{ + "index": { + "mode": "time_series" + } + }, + "mappings":{ + "dynamic_templates": [ + { + "labels": { + "path_match": "pod.labels.*", + "mapping": { + "type": "keyword", + "time_series_dimension": true + } + } + } + ], + "properties": { + "@timestamp" : { + "type": "date" + }, + "metricset": { + "type": "keyword", + "time_series_dimension": true + }, + "k8s": { + "properties": { + "pod": { + "properties": { + "name": { + "type": "keyword" + }, + "network": { + "properties": { + "tx": { + "type": "long" + }, + "rx": { + "type": "long" + } + } + } + } + } + } + } + } + } + } + """; + final String indexTemplate = """ + { + "index_patterns": ["$PATTERN"], + "template": $TEMPLATE, + "data_stream": { + } + }"""; + var putIndexTemplateRequest = new Request("POST", "/_index_template/reindex_test_data_stream_template"); + putIndexTemplateRequest.setJsonEntity(indexTemplate.replace("$TEMPLATE", template).replace("$PATTERN", dataStreamName)); + assertOK(client().performRequest(putIndexTemplateRequest)); + bulkLoadData(dataStreamName); + for (int i = 0; i < numRollovers; i++) { + rollover(dataStreamName); + bulkLoadData(dataStreamName); + } + } + + private void upgradeDataStream(String dataStreamName, int numRollovers) throws Exception { + Request reindexRequest = new Request("POST", "/_migration/reindex"); + reindexRequest.setJsonEntity(Strings.format(""" + { + "mode": "upgrade", + "source": { + "index": "%s" + } + }""", dataStreamName)); + Response reindexResponse = client().performRequest(reindexRequest); + assertOK(reindexResponse); + assertBusy(() -> { + Request statusRequest = new Request("GET", "_migration/reindex/" + dataStreamName + "/_status"); + Response statusResponse = client().performRequest(statusRequest); + Map statusResponseMap = XContentHelper.convertToMap( + JsonXContent.jsonXContent, + statusResponse.getEntity().getContent(), + false + ); + assertOK(statusResponse); + assertThat(statusResponseMap.get("complete"), equalTo(true)); + if (isOriginalClusterCurrent()) { + // If the original cluster was the same as this one, we don't want any indices reindexed: + assertThat(statusResponseMap.get("successes"), equalTo(0)); + } else { + assertThat(statusResponseMap.get("successes"), equalTo(numRollovers + 1)); + } + }, 60, TimeUnit.SECONDS); + Request cancelRequest = new Request("POST", "_migration/reindex/" + dataStreamName + "/_cancel"); + Response cancelResponse = client().performRequest(cancelRequest); + assertOK(cancelResponse); + } + + private static void bulkLoadData(String dataStreamName) throws IOException { + final String bulk = """ + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "cat", "network": {"tx": 2001818691, "rx": 802133794}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "hamster", "network": {"tx": 2005177954, "rx": 801479970}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "cow", "network": {"tx": 2006223737, "rx": 802337279}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "rat", "network": {"tx": 2012916202, "rx": 803685721}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "dog", "network": {"tx": 1434521831, "rx": 530575198}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "tiger", "network": {"tx": 1434577921, "rx": 530600088}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "lion", "network": {"tx": 1434587694, "rx": 530604797}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "elephant", "network": {"tx": 1434595272, "rx": 530605511}}}} + """; + var bulkRequest = new Request("POST", "/" + dataStreamName + "/_bulk"); + bulkRequest.setJsonEntity(bulk.replace("$now", formatInstant(Instant.now()))); + var response = client().performRequest(bulkRequest); + assertOK(response); + } + + static String formatInstant(Instant instant) { + return DateFormatter.forPattern(FormatNames.STRICT_DATE_OPTIONAL_TIME.getName()).format(instant); + } + + private static void rollover(String dataStreamName) throws IOException { + Request rolloverRequest = new Request("POST", "/" + dataStreamName + "/_rollover"); + Response rolloverResponse = client().performRequest(rolloverRequest); + assertOK(rolloverResponse); + } }