diff --git a/.buildkite/pipelines/intake.template.yml b/.buildkite/pipelines/intake.template.yml index 66b989d94455c..f530f237113a9 100644 --- a/.buildkite/pipelines/intake.template.yml +++ b/.buildkite/pipelines/intake.template.yml @@ -40,6 +40,14 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + - label: part5 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart5 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 8103b40cbaff0..b1f05ea23da4c 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -41,6 +41,14 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + - label: part5 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart5 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" diff --git a/.buildkite/pipelines/lucene-snapshot/run-tests.yml b/.buildkite/pipelines/lucene-snapshot/run-tests.yml index a5d3c4e5f7935..c76c54a56494e 100644 --- a/.buildkite/pipelines/lucene-snapshot/run-tests.yml +++ b/.buildkite/pipelines/lucene-snapshot/run-tests.yml @@ -40,6 +40,14 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + - label: part5 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart5 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" diff --git a/.buildkite/pipelines/periodic-platform-support.yml b/.buildkite/pipelines/periodic-platform-support.yml index f454d20fc542e..d8c5d55fc7e4f 100644 --- a/.buildkite/pipelines/periodic-platform-support.yml +++ b/.buildkite/pipelines/periodic-platform-support.yml @@ -48,6 +48,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: gcp @@ -72,6 +73,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: aws diff --git a/.buildkite/pipelines/periodic.template.yml b/.buildkite/pipelines/periodic.template.yml index b102208dd7cce..fda4315926b6b 100644 --- a/.buildkite/pipelines/periodic.template.yml +++ b/.buildkite/pipelines/periodic.template.yml @@ -50,6 +50,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: gcp @@ -92,6 +93,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: gcp diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 3748f4941420e..fa37d37d9de9a 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -391,6 +391,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: gcp @@ -433,6 +434,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: gcp diff --git a/.buildkite/pipelines/pull-request/part-5-arm.yml b/.buildkite/pipelines/pull-request/part-5-arm.yml new file mode 100644 index 0000000000000..7bc3a6157155b --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-5-arm.yml @@ -0,0 +1,13 @@ +config: + allow-labels: "test-arm" +steps: + - label: part-5-arm + command: .ci/scripts/run-gradle.sh -Dignore.tests.seed checkPart5 + timeout_in_minutes: 300 + agents: + provider: aws + imagePrefix: elasticsearch-ubuntu-2004-aarch64 + instanceType: m6g.8xlarge + diskSizeGb: 350 + diskType: gp3 + diskName: /dev/sda1 diff --git a/.buildkite/pipelines/pull-request/part-5-fips.yml b/.buildkite/pipelines/pull-request/part-5-fips.yml new file mode 100644 index 0000000000000..4e193ac751086 --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-5-fips.yml @@ -0,0 +1,11 @@ +config: + allow-labels: "Team:Security" +steps: + - label: part-5-fips + command: .ci/scripts/run-gradle.sh -Dignore.tests.seed -Dtests.fips.enabled=true checkPart5 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk diff --git a/.buildkite/pipelines/pull-request/part-5-windows.yml b/.buildkite/pipelines/pull-request/part-5-windows.yml new file mode 100644 index 0000000000000..4e16a8ef73238 --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-5-windows.yml @@ -0,0 +1,14 @@ +config: + allow-labels: "test-windows" +steps: + - label: part-5-windows + command: .\.buildkite\scripts\run-script.ps1 bash .buildkite/scripts/windows-run-gradle.sh + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-windows-2022 + machineType: custom-32-98304 + diskType: pd-ssd + diskSizeGb: 350 + env: + GRADLE_TASK: checkPart5 diff --git a/.buildkite/pipelines/pull-request/part-5.yml b/.buildkite/pipelines/pull-request/part-5.yml new file mode 100644 index 0000000000000..306ce7533d0ed --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-5.yml @@ -0,0 +1,11 @@ +config: + skip-target-branches: "7.17" +steps: + - label: part-5 + command: .ci/scripts/run-gradle.sh -Dignore.tests.seed checkPart5 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk diff --git a/build.gradle b/build.gradle index 16c6fce28fe4b..1d9757f32543d 100644 --- a/build.gradle +++ b/build.gradle @@ -287,6 +287,8 @@ allprojects { tasks.register('checkPart4') { dependsOn 'check' } } else if (project.path == ":x-pack:plugin" || project.path.contains("ql") || project.path.contains("smoke-test")) { tasks.register('checkPart3') { dependsOn 'check' } + } else if (project.path.contains("multi-node")) { + tasks.register('checkPart5') { dependsOn 'check' } } else { tasks.register('checkPart2') { dependsOn 'check' } } diff --git a/docs/changelog/101373.yaml b/docs/changelog/101373.yaml new file mode 100644 index 0000000000000..53b5680301c79 --- /dev/null +++ b/docs/changelog/101373.yaml @@ -0,0 +1,6 @@ +pr: 101373 +summary: Adding aggregations support for the `_ignored` field +area: Search +type: feature +issues: + - 59946 diff --git a/docs/changelog/105792.yaml b/docs/changelog/105792.yaml new file mode 100644 index 0000000000000..2ad5aa970c214 --- /dev/null +++ b/docs/changelog/105792.yaml @@ -0,0 +1,18 @@ +pr: 105792 +summary: "Change `skip_unavailable` remote cluster setting default value to true" +area: Search +type: breaking +issues: [] +breaking: + title: "Change `skip_unavailable` remote cluster setting default value to true" + area: Cluster and node setting + details: The default value of the `skip_unavailable` setting is now set to true. + All existing and future remote clusters that do not define this setting will use the new default. + This setting only affects cross-cluster searches using the _search or _async_search API. + impact: Unavailable remote clusters in a cross-cluster search will no longer cause the search to fail unless + skip_unavailable is configured to be `false` in elasticsearch.yml or via the `_cluster/settings` API. + Unavailable clusters with `skip_unavailable`=`true` (either explicitly or by using the new default) are marked + as SKIPPED in the search response metadata section and do not fail the entire search. If users want to ensure that a + search returns a failure when a particular remote cluster is not available, `skip_unavailable` must be now be + set explicitly. + notable: false diff --git a/docs/changelog/107481.yaml b/docs/changelog/107481.yaml new file mode 100644 index 0000000000000..9e65b457c9ed6 --- /dev/null +++ b/docs/changelog/107481.yaml @@ -0,0 +1,5 @@ +pr: 107481 +summary: Block specific config files from being read after startup +area: Security +type: bug +issues: [] diff --git a/docs/changelog/107779.yaml b/docs/changelog/107779.yaml new file mode 100644 index 0000000000000..a41c19a2329e0 --- /dev/null +++ b/docs/changelog/107779.yaml @@ -0,0 +1,6 @@ +pr: 107779 +summary: Allow rescorer with field collapsing +area: Search +type: enhancement +issues: + - 27243 \ No newline at end of file diff --git a/docs/changelog/107813.yaml b/docs/changelog/107813.yaml new file mode 100644 index 0000000000000..1cbb518a8be5b --- /dev/null +++ b/docs/changelog/107813.yaml @@ -0,0 +1,6 @@ +pr: 107813 +summary: Increase size of big arrays only when there is an actual value in the aggregators + (Analytics module) +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/107877.yaml b/docs/changelog/107877.yaml new file mode 100644 index 0000000000000..cf458b3aa3a42 --- /dev/null +++ b/docs/changelog/107877.yaml @@ -0,0 +1,5 @@ +pr: 107877 +summary: Support metrics counter types in ESQL +area: "ES|QL" +type: enhancement +issues: [] diff --git a/docs/changelog/107947.yaml b/docs/changelog/107947.yaml new file mode 100644 index 0000000000000..637ac3c005779 --- /dev/null +++ b/docs/changelog/107947.yaml @@ -0,0 +1,6 @@ +pr: 107947 +summary: "ESQL: Fix equals `hashCode` for functions" +area: ES|QL +type: bug +issues: + - 104393 diff --git a/docs/changelog/107969.yaml b/docs/changelog/107969.yaml new file mode 100644 index 0000000000000..ed63513d8d57d --- /dev/null +++ b/docs/changelog/107969.yaml @@ -0,0 +1,5 @@ +pr: 107969 +summary: Disable PIT for remote clusters +area: Transform +type: bug +issues: [] diff --git a/docs/changelog/107990.yaml b/docs/changelog/107990.yaml new file mode 100644 index 0000000000000..80cb96aca4426 --- /dev/null +++ b/docs/changelog/107990.yaml @@ -0,0 +1,5 @@ +pr: 107990 +summary: Optimise `time_series` aggregation for single value fields +area: TSDB +type: enhancement +issues: [] diff --git a/docs/changelog/108016.yaml b/docs/changelog/108016.yaml new file mode 100644 index 0000000000000..0aa3f86a6f859 --- /dev/null +++ b/docs/changelog/108016.yaml @@ -0,0 +1,5 @@ +pr: 108016 +summary: Optimise `BinaryRangeAggregator` for single value fields +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc index d30cd43a4db5e..a9fe8be93d018 100644 --- a/docs/reference/ccr/getting-started.asciidoc +++ b/docs/reference/ccr/getting-started.asciidoc @@ -147,7 +147,7 @@ cluster with cluster alias `leader`. "num_nodes_connected" : 1, <1> "max_connections_per_cluster" : 3, "initial_connect_timeout" : "30s", - "skip_unavailable" : false, + "skip_unavailable" : true, "mode" : "sniff" } } diff --git a/docs/reference/esql/functions/examples/bucket.asciidoc b/docs/reference/esql/functions/examples/bucket.asciidoc index f66f737b7d4b5..e1bba0529d7db 100644 --- a/docs/reference/esql/functions/examples/bucket.asciidoc +++ b/docs/reference/esql/functions/examples/bucket.asciidoc @@ -108,7 +108,6 @@ include::{esql-specs}/bucket.csv-spec[tag=bucket_in_agg] |=== include::{esql-specs}/bucket.csv-spec[tag=bucket_in_agg-result] |=== - `BUCKET` may be used in both the aggregating and grouping part of the <> command provided that in the aggregating part the function is referenced by an alias defined in the @@ -121,3 +120,4 @@ include::{esql-specs}/bucket.csv-spec[tag=reuseGroupingFunctionWithExpression] |=== include::{esql-specs}/bucket.csv-spec[tag=reuseGroupingFunctionWithExpression-result] |=== + diff --git a/docs/reference/esql/functions/kibana/definition/bucket.json b/docs/reference/esql/functions/kibana/definition/bucket.json index 986c0e8f91d33..7141ca4c27443 100644 --- a/docs/reference/esql/functions/kibana/definition/bucket.json +++ b/docs/reference/esql/functions/kibana/definition/bucket.json @@ -943,6 +943,7 @@ "FROM employees\n| STATS COUNT(*) by bs = BUCKET(salary, 20, 25324, 74999)\n| SORT bs", "FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS c = COUNT(1) BY b = BUCKET(salary, 5000.)\n| SORT b", "FROM sample_data \n| WHERE @timestamp >= NOW() - 1 day and @timestamp < NOW()\n| STATS COUNT(*) BY bucket = BUCKET(@timestamp, 25, NOW() - 1 day, NOW())", - "FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS AVG(salary) BY bucket = BUCKET(hire_date, 20, \"1985-01-01T00:00:00Z\", \"1986-01-01T00:00:00Z\")\n| SORT bucket" + "FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS AVG(salary) BY bucket = BUCKET(hire_date, 20, \"1985-01-01T00:00:00Z\", \"1986-01-01T00:00:00Z\")\n| SORT bucket", + "FROM employees\n| STATS s1 = b1 + 1, s2 = BUCKET(salary / 1000 + 999, 50.) + 2 BY b1 = BUCKET(salary / 100 + 99, 50.), b2 = BUCKET(salary / 1000 + 999, 50.)\n| SORT b1, b2\n| KEEP s1, b1, s2, b2" ] } diff --git a/docs/reference/esql/functions/kibana/definition/to_double.json b/docs/reference/esql/functions/kibana/definition/to_double.json index 4a466e76562e9..f4e414068db61 100644 --- a/docs/reference/esql/functions/kibana/definition/to_double.json +++ b/docs/reference/esql/functions/kibana/definition/to_double.json @@ -16,6 +16,42 @@ "variadic" : false, "returnType" : "double" }, + { + "params" : [ + { + "name" : "field", + "type" : "counter_double", + "optional" : false, + "description" : "Input value. The input can be a single- or multi-valued column or an expression." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "counter_integer", + "optional" : false, + "description" : "Input value. The input can be a single- or multi-valued column or an expression." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "counter_long", + "optional" : false, + "description" : "Input value. The input can be a single- or multi-valued column or an expression." + } + ], + "variadic" : false, + "returnType" : "double" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/to_integer.json b/docs/reference/esql/functions/kibana/definition/to_integer.json index 4284265c4f93c..2776d8b29c412 100644 --- a/docs/reference/esql/functions/kibana/definition/to_integer.json +++ b/docs/reference/esql/functions/kibana/definition/to_integer.json @@ -16,6 +16,18 @@ "variadic" : false, "returnType" : "integer" }, + { + "params" : [ + { + "name" : "field", + "type" : "counter_integer", + "optional" : false, + "description" : "Input value. The input can be a single- or multi-valued column or an expression." + } + ], + "variadic" : false, + "returnType" : "integer" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/to_long.json b/docs/reference/esql/functions/kibana/definition/to_long.json index 25e7f82f18547..e3218eba9642a 100644 --- a/docs/reference/esql/functions/kibana/definition/to_long.json +++ b/docs/reference/esql/functions/kibana/definition/to_long.json @@ -16,6 +16,30 @@ "variadic" : false, "returnType" : "long" }, + { + "params" : [ + { + "name" : "field", + "type" : "counter_integer", + "optional" : false, + "description" : "Input value. The input can be a single- or multi-valued column or an expression." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "counter_long", + "optional" : false, + "description" : "Input value. The input can be a single- or multi-valued column or an expression." + } + ], + "variadic" : false, + "returnType" : "long" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/types/to_double.asciidoc b/docs/reference/esql/functions/types/to_double.asciidoc index c78c3974af5a5..cff686c7bc4ca 100644 --- a/docs/reference/esql/functions/types/to_double.asciidoc +++ b/docs/reference/esql/functions/types/to_double.asciidoc @@ -6,6 +6,9 @@ |=== field | result boolean | double +counter_double | double +counter_integer | double +counter_long | double datetime | double double | double integer | double diff --git a/docs/reference/esql/functions/types/to_integer.asciidoc b/docs/reference/esql/functions/types/to_integer.asciidoc index 11fd7914c5b0f..974f3c9c82d88 100644 --- a/docs/reference/esql/functions/types/to_integer.asciidoc +++ b/docs/reference/esql/functions/types/to_integer.asciidoc @@ -6,6 +6,7 @@ |=== field | result boolean | integer +counter_integer | integer datetime | integer double | integer integer | integer diff --git a/docs/reference/esql/functions/types/to_long.asciidoc b/docs/reference/esql/functions/types/to_long.asciidoc index 4bc927fd94697..b3959c5444e34 100644 --- a/docs/reference/esql/functions/types/to_long.asciidoc +++ b/docs/reference/esql/functions/types/to_long.asciidoc @@ -6,6 +6,8 @@ |=== field | result boolean | long +counter_integer | long +counter_long | long datetime | long double | long integer | long diff --git a/docs/reference/mapping/fields/ignored-field.asciidoc b/docs/reference/mapping/fields/ignored-field.asciidoc index 5fd6c478438ab..48f8626c5ab0b 100644 --- a/docs/reference/mapping/fields/ignored-field.asciidoc +++ b/docs/reference/mapping/fields/ignored-field.asciidoc @@ -43,3 +43,20 @@ GET _search } } -------------------------------------------------- + +Since 8.15.0, the `_ignored` field supports aggregations as well. +For example, the below query finds all fields that got ignored: + +[source,console] +-------------------------------------------------- +GET _search +{ + "aggs": { + "ignored_fields": { + "terms": { + "field": "_ignored" + } + } + } +} +-------------------------------------------------- diff --git a/docs/reference/modules/cluster/remote-clusters-connect.asciidoc b/docs/reference/modules/cluster/remote-clusters-connect.asciidoc index 7fb345660e086..5344cb97465d7 100644 --- a/docs/reference/modules/cluster/remote-clusters-connect.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-connect.asciidoc @@ -37,7 +37,7 @@ clusters on individual nodes in the local cluster, define static settings in `elasticsearch.yml` for each node. The following request adds a remote cluster with an alias of `cluster_one`. This -_cluster alias_ is a unique identifier that represents the connection to the +_cluster alias_ is a unique identifier that represents the connection to the remote cluster and is used to distinguish between local and remote indices. [source,console,subs=attributes+] @@ -60,7 +60,7 @@ PUT /_cluster/settings // TEST[setup:host] // TEST[s/127.0.0.1:\{remote-interface-default-port\}/\${transport_host}/] <1> The cluster alias of this remote cluster is `cluster_one`. -<2> Specifies the hostname and {remote-interface} port of a seed node in the +<2> Specifies the hostname and {remote-interface} port of a seed node in the remote cluster. You can use the <> to verify that @@ -86,7 +86,7 @@ cluster with the cluster alias `cluster_one`: "num_nodes_connected" : 1, <1> "max_connections_per_cluster" : 3, "initial_connect_timeout" : "30s", - "skip_unavailable" : false, <2> + "skip_unavailable" : true, <2> ifeval::["{trust-mechanism}"=="api-key"] "cluster_credentials": "::es_redacted::", <3> endif::[] @@ -103,7 +103,7 @@ connected to. <2> Indicates whether to skip the remote cluster if searched through {ccs} but no nodes are available. ifeval::["{trust-mechanism}"=="api-key"] -<3> If present, indicates the remote cluster has connected using API key +<3> If present, indicates the remote cluster has connected using API key authentication. endif::[] @@ -187,7 +187,7 @@ PUT _cluster/settings You can delete a remote cluster from the cluster settings by passing `null` values for each remote cluster setting. The following request removes -`cluster_two` from the cluster settings, leaving `cluster_one` and +`cluster_two` from the cluster settings, leaving `cluster_one` and `cluster_three` intact: [source,console] @@ -212,15 +212,15 @@ PUT _cluster/settings ===== Statically configure remote clusters If you specify settings in `elasticsearch.yml`, only the nodes with -those settings can connect to the remote cluster and serve remote cluster +those settings can connect to the remote cluster and serve remote cluster requests. -NOTE: Remote cluster settings that are specified using the +NOTE: Remote cluster settings that are specified using the <> take precedence over settings that you specify in `elasticsearch.yml` for individual nodes. -In the following example, `cluster_one`, `cluster_two`, and `cluster_three` are -arbitrary cluster aliases representing the connection to each cluster. These +In the following example, `cluster_one`, `cluster_two`, and `cluster_three` are +arbitrary cluster aliases representing the connection to each cluster. These names are subsequently used to distinguish between local and remote indices. [source,yaml,subs=attributes+] diff --git a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc index bba8c7ffb3491..ec61c4c59fc74 100644 --- a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc @@ -28,9 +28,20 @@ mode are described separately. Per cluster boolean setting that allows to skip specific clusters when no nodes belonging to them are available and they are the target of a remote - cluster request. Default is `false`, meaning that all clusters are mandatory - by default, but they can selectively be made optional by setting this setting - to `true`. + cluster request. + +IMPORTANT: In Elasticsearch 8.15, the default value for `skip_unavailable` was +changed from `false` to `true`. Before Elasticsearch 8.15, if you want a cluster +to be treated as optional for a {ccs}, then you need to set that configuration. +From Elasticsearch 8.15 forward, you need to set the configuration in order to +make a cluster required for the {ccs}. Once you upgrade the local ("querying") +cluster search coordinator node (the node you send CCS requests to) to 8.15 or later, +any remote clusters that do not have an explicit setting for `skip_unavailable` will +immediately change over to using the new default of true. This is true regardless of +whether you have upgraded the remote clusters to 8.15, as the `skip_unavailable` +search behavior is entirely determined by the setting on the local cluster where +you configure the remotes. + `cluster.remote..transport.ping_schedule`:: diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index 48c65ed0abc7b..3fed14231808c 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -194,7 +194,7 @@ The API returns the following result: "load_source_count": 5 }, "debug": { - "stored_fields": ["_id", "_ignored", "_routing", "_source"] + "stored_fields": ["_id", "_routing", "_source"] }, "children": [ { @@ -1051,7 +1051,7 @@ And here is the fetch profile: "load_source_count": 5 }, "debug": { - "stored_fields": ["_id", "_ignored", "_routing", "_source"] + "stored_fields": ["_id", "_routing", "_source"] }, "children": [ { diff --git a/docs/reference/search/search-your-data/collapse-search-results.asciidoc b/docs/reference/search/search-your-data/collapse-search-results.asciidoc index ffb6238c89e10..f88fa0d4aca15 100644 --- a/docs/reference/search/search-your-data/collapse-search-results.asciidoc +++ b/docs/reference/search/search-your-data/collapse-search-results.asciidoc @@ -47,7 +47,7 @@ NOTE: Collapsing is applied to the top hits only and does not affect aggregation [[expand-collapse-results]] ==== Expand collapse results -It is also possible to expand each collapsed top hits with the `inner_hits` option. +It is also possible to expand each collapsed top hits with the <> option. [source,console] ---- @@ -86,7 +86,7 @@ GET /my-index-000001/_search See <> for the complete list of supported options and the format of the response. -It is also possible to request multiple `inner_hits` for each collapsed hit. This can be useful when you want to get +It is also possible to request multiple <> for each collapsed hit. This can be useful when you want to get multiple representations of the collapsed hits. [source,console] @@ -145,8 +145,7 @@ The `max_concurrent_group_searches` request parameter can be used to control the maximum number of concurrent searches allowed in this phase. The default is based on the number of data nodes and the default search thread pool size. -WARNING: `collapse` cannot be used in conjunction with <> or -<>. +WARNING: `collapse` cannot be used in conjunction with <>. [discrete] [[collapsing-with-search-after]] @@ -175,6 +174,65 @@ GET /my-index-000001/_search ---- // TEST[setup:my_index] +[discrete] +[[rescore-collapse-results]] +==== Rescore collapse results + +You can use field collapsing alongside the <> search parameter. +Rescorers run on every shard for the top-ranked document per collapsed field. +To maintain a reliable order, it is recommended to cluster documents sharing the same collapse +field value on one shard. +This is achieved by assigning the collapse field value as the <> +during indexing: + +[source,console] +---- +POST /my-index-000001/_doc?routing=xyz <1> +{ + "@timestamp": "2099-11-15T13:12:00", + "message": "You know for search!", + "user.id": "xyz" +} +---- +// TEST[setup:my_index] +<1> Assign routing with the collapse field value (`user.id`). + +By doing this, you guarantee that only one top document per +collapse key gets rescored globally. + +The following request utilizes field collapsing on the `user.id` +field and then rescores the top groups with a <>: + +[source,console] +---- +GET /my-index-000001/_search +{ + "query": { + "match": { + "message": "you know for search" + } + }, + "collapse": { + "field": "user.id" + }, + "rescore" : { + "window_size" : 50, + "query" : { + "rescore_query" : { + "match_phrase": { + "message": "you know for search" + } + }, + "query_weight" : 0.3, + "rescore_query_weight" : 1.4 + } + } +} +---- +// TEST[setup:my_index] + +WARNING: Rescorers are not applied to <>. + [discrete] [[second-level-of-collapsing]] ==== Second level of collapsing diff --git a/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc b/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc index 1d040a116ad9a..2e9693eff0451 100644 --- a/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc +++ b/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc @@ -64,12 +64,6 @@ When exposing pagination to users, `window_size` should remain constant as each Depending on how your model is trained, it’s possible that the model will return negative scores for documents. While negative scores are not allowed from first-stage retrieval and ranking, it is possible to use them in the LTR rescorer. -[discrete] -[[learning-to-rank-rescorer-limitations-field-collapsing]] -====== Compatibility with field collapsing - -LTR rescorers are not compatible with the <>. - [discrete] [[learning-to-rank-rescorer-limitations-term-statistics]] ====== Term statistics as features diff --git a/docs/reference/search/search-your-data/search-across-clusters.asciidoc b/docs/reference/search/search-your-data/search-across-clusters.asciidoc index 2573722b6d2e7..5f9e92c575793 100644 --- a/docs/reference/search/search-your-data/search-across-clusters.asciidoc +++ b/docs/reference/search/search-your-data/search-across-clusters.asciidoc @@ -1178,7 +1178,13 @@ gathered from all 3 clusters and the total shard count on each cluster is listed By default, a {ccs} fails if a remote cluster in the request is unavailable or returns an error where the search on all shards failed. Use the `skip_unavailable` cluster setting to mark a specific remote cluster as -optional for {ccs}. +either optional or required for {ccs}. + +IMPORTANT: In Elasticsearch 8.15, the default value for `skip_unavailable` was +changed from `false` to `true`. Before Elasticsearch 8.15, if you want a cluster +to be treated as optional for a {ccs}, then you need to set that configuration. +From Elasticsearch 8.15 forward, you need to set the configuration in order to +make a cluster required for the {ccs}. If `skip_unavailable` is `true`, a {ccs}: @@ -1196,25 +1202,33 @@ parameter and the related `search.default_allow_partial_results` cluster setting when searching the remote cluster. This means searches on the remote cluster may return partial results. -The following <> -API request changes `skip_unavailable` setting to `true` for `cluster_two`. - -[source,console] --------------------------------- -PUT _cluster/settings -{ - "persistent": { - "cluster.remote.cluster_two.skip_unavailable": true - } -} --------------------------------- -// TEST[continued] - -If `cluster_two` is disconnected or unavailable during a {ccs}, {es} won't -include matching documents from that cluster in the final results. If at -least one shard provides results, those results will be used and the -search will return partial data. (If doing {ccs} using async search, -the `is_partial` field will be set to `true` to indicate partial results.) +You can modify the `skip_unavailable` setting by editing the `cluster.remote.` +settings in the elasticsearch.yml config file. For example: + +``` +cluster: + remote: + cluster_one: + seeds: 35.238.149.1:9300 + skip_unavailable: false + cluster_two: + seeds: 35.238.149.2:9300 + skip_unavailable: true +``` + +Or you can set the cluster.remote settings via the +<> API as shown +<>. + +When a remote cluster configured with `skip_unavailable: true` (such as +`cluster_two` above) is disconnected or unavailable during a {ccs}, {es} won't +include matching documents from that cluster in the final results and the +search will be considered successful (HTTP status 200 OK). + +If at least one shard from a cluster provides search results, those results will +be used and the search will return partial data. This is true regardless of +the `skip_unavailable` setting of the remote cluster. (If doing {ccs} using async +search, the `is_partial` field will be set to `true` to indicate partial results.) [discrete] [[ccs-network-delays]] diff --git a/libs/core/src/main/java/org/elasticsearch/core/TimeValue.java b/libs/core/src/main/java/org/elasticsearch/core/TimeValue.java index 30883ef3af731..df7c47943289d 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/TimeValue.java +++ b/libs/core/src/main/java/org/elasticsearch/core/TimeValue.java @@ -17,9 +17,11 @@ public class TimeValue implements Comparable { /** How many nano-seconds in one milli-second */ public static final long NSEC_PER_MSEC = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS); - public static final TimeValue MINUS_ONE = timeValueMillis(-1); - public static final TimeValue ZERO = timeValueMillis(0); - public static final TimeValue MAX_VALUE = TimeValue.timeValueNanos(Long.MAX_VALUE); + public static final TimeValue MINUS_ONE = new TimeValue(-1, TimeUnit.MILLISECONDS); + public static final TimeValue ZERO = new TimeValue(0, TimeUnit.MILLISECONDS); + public static final TimeValue MAX_VALUE = new TimeValue(Long.MAX_VALUE, TimeUnit.NANOSECONDS); + public static final TimeValue THIRTY_SECONDS = new TimeValue(30, TimeUnit.SECONDS); + public static final TimeValue ONE_MINUTE = new TimeValue(1, TimeUnit.MINUTES); private static final long C0 = 1L; private static final long C1 = C0 * 1000L; @@ -49,14 +51,28 @@ public static TimeValue timeValueNanos(long nanos) { } public static TimeValue timeValueMillis(long millis) { + if (millis == 0) { + return ZERO; + } + if (millis == -1) { + return MINUS_ONE; + } return new TimeValue(millis, TimeUnit.MILLISECONDS); } public static TimeValue timeValueSeconds(long seconds) { + if (seconds == 30) { + // common value, no need to allocate each time + return THIRTY_SECONDS; + } return new TimeValue(seconds, TimeUnit.SECONDS); } public static TimeValue timeValueMinutes(long minutes) { + if (minutes == 1) { + // common value, no need to allocate each time + return ONE_MINUTE; + } return new TimeValue(minutes, TimeUnit.MINUTES); } @@ -355,18 +371,18 @@ public static TimeValue parseTimeValue(@Nullable String sValue, TimeValue defaul } final String normalized = sValue.toLowerCase(Locale.ROOT).trim(); if (normalized.endsWith("nanos")) { - return new TimeValue(parse(sValue, normalized, "nanos", settingName), TimeUnit.NANOSECONDS); + return TimeValue.timeValueNanos(parse(sValue, normalized, "nanos", settingName)); } else if (normalized.endsWith("micros")) { return new TimeValue(parse(sValue, normalized, "micros", settingName), TimeUnit.MICROSECONDS); } else if (normalized.endsWith("ms")) { - return new TimeValue(parse(sValue, normalized, "ms", settingName), TimeUnit.MILLISECONDS); + return TimeValue.timeValueMillis(parse(sValue, normalized, "ms", settingName)); } else if (normalized.endsWith("s")) { - return new TimeValue(parse(sValue, normalized, "s", settingName), TimeUnit.SECONDS); + return TimeValue.timeValueSeconds(parse(sValue, normalized, "s", settingName)); } else if (sValue.endsWith("m")) { // parsing minutes should be case-sensitive as 'M' means "months", not "minutes"; this is the only special case. - return new TimeValue(parse(sValue, normalized, "m", settingName), TimeUnit.MINUTES); + return TimeValue.timeValueMinutes(parse(sValue, normalized, "m", settingName)); } else if (normalized.endsWith("h")) { - return new TimeValue(parse(sValue, normalized, "h", settingName), TimeUnit.HOURS); + return TimeValue.timeValueHours(parse(sValue, normalized, "h", settingName)); } else if (normalized.endsWith("d")) { return new TimeValue(parse(sValue, normalized, "d", settingName), TimeUnit.DAYS); } else if (normalized.matches("-0*1")) { diff --git a/libs/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java b/libs/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java index a021299aaa06d..b6481db9b9951 100644 --- a/libs/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java +++ b/libs/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java @@ -242,4 +242,16 @@ private TimeUnit randomTimeUnitObject() { TimeUnit.DAYS ); } + + public void testInternedValues() { + assertSame(TimeValue.timeValueMillis(-1), TimeValue.MINUS_ONE); + assertSame(TimeValue.timeValueMillis(0), TimeValue.ZERO); + assertSame(TimeValue.timeValueSeconds(30), TimeValue.THIRTY_SECONDS); + assertSame(TimeValue.timeValueMinutes(1), TimeValue.ONE_MINUTE); + + assertSame(TimeValue.parseTimeValue("-1", getTestName()), TimeValue.MINUS_ONE); + assertSame(TimeValue.parseTimeValue("0", getTestName()), TimeValue.ZERO); + assertSame(TimeValue.parseTimeValue("30s", getTestName()), TimeValue.THIRTY_SECONDS); + assertSame(TimeValue.parseTimeValue("1m", getTestName()), TimeValue.ONE_MINUTE); + } } diff --git a/libs/native/libraries/build.gradle b/libs/native/libraries/build.gradle index 73c2c6fe14ba6..e072359620748 100644 --- a/libs/native/libraries/build.gradle +++ b/libs/native/libraries/build.gradle @@ -18,7 +18,7 @@ configurations { } var zstdVersion = "1.5.5" -var vecVersion = "1.0.1" +var vecVersion = "1.0.3" repositories { exclusiveContent { diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java index 993c9d2a874b6..56017d3a8a20a 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java @@ -27,7 +27,7 @@ abstract class PosixNativeAccess extends AbstractNativeAccess { static VectorSimilarityFunctions vectorSimilarityFunctionsOrNull(NativeLibraryProvider libraryProvider) { if (isNativeVectorLibSupported()) { - var lib = new VectorSimilarityFunctions(libraryProvider.getLibrary(VectorLibrary.class)); + var lib = libraryProvider.getLibrary(VectorLibrary.class).getVectorSimilarityFunctions(); logger.info("Using native vector library; to disable start with -D" + ENABLE_JDK_VECTOR_LIBRARY + "=false"); return lib; } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/VectorSimilarityFunctions.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/VectorSimilarityFunctions.java index 7cb852ccf7876..6b8f6048fe058 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/VectorSimilarityFunctions.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/VectorSimilarityFunctions.java @@ -8,25 +8,16 @@ package org.elasticsearch.nativeaccess; -import org.elasticsearch.nativeaccess.lib.VectorLibrary; - import java.lang.invoke.MethodHandle; /** - * Utility class providing vector similarity functions. + * Utility interface providing vector similarity functions. * *

MethodHandles are returned to avoid a static reference to MemorySegment, * which is not in the currently lowest compile version, JDK 17. Code consuming * the method handles will, by definition, require access to MemorySegment. */ -public final class VectorSimilarityFunctions implements VectorLibrary { - - private final VectorLibrary vectorLibrary; - - VectorSimilarityFunctions(VectorLibrary vectorLibrary) { - this.vectorLibrary = vectorLibrary; - } - +public interface VectorSimilarityFunctions { /** * Produces a method handle returning the dot product of byte (signed int8) vectors. * @@ -34,9 +25,7 @@ public final class VectorSimilarityFunctions implements VectorLibrary { * its first and second arguments will be {@code MemorySegment}, whose contents is the * vector data bytes. The third argument is the length of the vector data. */ - public MethodHandle dotProductHandle() { - return vectorLibrary.dotProductHandle(); - } + MethodHandle dotProductHandle(); /** * Produces a method handle returning the square distance of byte (signed int8) vectors. @@ -45,7 +34,5 @@ public MethodHandle dotProductHandle() { * its first and second arguments will be {@code MemorySegment}, whose contents is the * vector data bytes. The third argument is the length of the vector data. */ - public MethodHandle squareDistanceHandle() { - return vectorLibrary.squareDistanceHandle(); - } + MethodHandle squareDistanceHandle(); } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/VectorLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/VectorLibrary.java index a11533c29bebc..86d1a82b2bdc9 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/VectorLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/VectorLibrary.java @@ -8,7 +8,8 @@ package org.elasticsearch.nativeaccess.lib; -import java.lang.invoke.MethodHandle; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.nativeaccess.VectorSimilarityFunctions; /** * A VectorLibrary is just an adaptation of the factory for a NativeLibrary. @@ -16,8 +17,6 @@ * for native implementations. */ public non-sealed interface VectorLibrary extends NativeLibrary { - - MethodHandle dotProductHandle(); - - MethodHandle squareDistanceHandle(); + @Nullable + VectorSimilarityFunctions getVectorSimilarityFunctions(); } diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java index d4ab57396e290..b988c9730fd1b 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java @@ -8,6 +8,7 @@ package org.elasticsearch.nativeaccess.jdk; +import org.elasticsearch.nativeaccess.VectorSimilarityFunctions; import org.elasticsearch.nativeaccess.lib.VectorLibrary; import java.lang.foreign.FunctionDescriptor; @@ -23,142 +24,166 @@ public final class JdkVectorLibrary implements VectorLibrary { + static final VectorSimilarityFunctions INSTANCE; + static { System.loadLibrary("vec"); + final MethodHandle vecCaps$mh = downcallHandle("vec_caps", FunctionDescriptor.of(JAVA_INT)); + + try { + int caps = (int) vecCaps$mh.invokeExact(); + if (caps != 0) { + INSTANCE = new JdkVectorSimilarityFunctions(); + } else { + INSTANCE = null; + } + } catch (Throwable t) { + throw new AssertionError(t); + } } public JdkVectorLibrary() {} - static final MethodHandle dot8stride$mh = downcallHandle("dot8s_stride", FunctionDescriptor.of(JAVA_INT)); - static final MethodHandle sqr8stride$mh = downcallHandle("sqr8s_stride", FunctionDescriptor.of(JAVA_INT)); + @Override + public VectorSimilarityFunctions getVectorSimilarityFunctions() { + return INSTANCE; + } - static final MethodHandle dot8s$mh = downcallHandle("dot8s", FunctionDescriptor.of(JAVA_INT, ADDRESS, ADDRESS, JAVA_INT)); - static final MethodHandle sqr8s$mh = downcallHandle("sqr8s", FunctionDescriptor.of(JAVA_INT, ADDRESS, ADDRESS, JAVA_INT)); + private static final class JdkVectorSimilarityFunctions implements VectorSimilarityFunctions { - // Stride of the native implementation - consumes this number of bytes per loop invocation. - // There must be at least this number of bytes/elements available when going native - static final int DOT_STRIDE = 32; - static final int SQR_STRIDE = 16; + static final MethodHandle dot8stride$mh = downcallHandle("dot8s_stride", FunctionDescriptor.of(JAVA_INT)); + static final MethodHandle sqr8stride$mh = downcallHandle("sqr8s_stride", FunctionDescriptor.of(JAVA_INT)); - static { - assert DOT_STRIDE > 0 && (DOT_STRIDE & (DOT_STRIDE - 1)) == 0 : "Not a power of two"; - assert dot8Stride() == DOT_STRIDE : dot8Stride() + " != " + DOT_STRIDE; - assert SQR_STRIDE > 0 && (SQR_STRIDE & (SQR_STRIDE - 1)) == 0 : "Not a power of two"; - assert sqr8Stride() == SQR_STRIDE : sqr8Stride() + " != " + SQR_STRIDE; - } + static final MethodHandle dot8s$mh = downcallHandle("dot8s", FunctionDescriptor.of(JAVA_INT, ADDRESS, ADDRESS, JAVA_INT)); + static final MethodHandle sqr8s$mh = downcallHandle("sqr8s", FunctionDescriptor.of(JAVA_INT, ADDRESS, ADDRESS, JAVA_INT)); - /** - * Computes the dot product of given byte vectors. - * @param a address of the first vector - * @param b address of the second vector - * @param length the vector dimensions - */ - static int dotProduct(MemorySegment a, MemorySegment b, int length) { - assert length >= 0; - if (a.byteSize() != b.byteSize()) { - throw new IllegalArgumentException("dimensions differ: " + a.byteSize() + "!=" + b.byteSize()); - } - if (length > a.byteSize()) { - throw new IllegalArgumentException("length: " + length + ", greater than vector dimensions: " + a.byteSize()); - } - int i = 0; - int res = 0; - if (length >= DOT_STRIDE) { - i += length & ~(DOT_STRIDE - 1); - res = dot8s(a, b, i); - } + // Stride of the native implementation - consumes this number of bytes per loop invocation. + // There must be at least this number of bytes/elements available when going native + static final int DOT_STRIDE = 32; + static final int SQR_STRIDE = 16; - // tail - for (; i < length; i++) { - res += a.get(JAVA_BYTE, i) * b.get(JAVA_BYTE, i); + static { + assert DOT_STRIDE > 0 && (DOT_STRIDE & (DOT_STRIDE - 1)) == 0 : "Not a power of two"; + assert dot8Stride() == DOT_STRIDE : dot8Stride() + " != " + DOT_STRIDE; + assert SQR_STRIDE > 0 && (SQR_STRIDE & (SQR_STRIDE - 1)) == 0 : "Not a power of two"; + assert sqr8Stride() == SQR_STRIDE : sqr8Stride() + " != " + SQR_STRIDE; } - assert i == length; - return res; - } - /** - * Computes the square distance of given byte vectors. - * @param a address of the first vector - * @param b address of the second vector - * @param length the vector dimensions - */ - static int squareDistance(MemorySegment a, MemorySegment b, int length) { - assert length >= 0; - if (a.byteSize() != b.byteSize()) { - throw new IllegalArgumentException("dimensions differ: " + a.byteSize() + "!=" + b.byteSize()); - } - if (length > a.byteSize()) { - throw new IllegalArgumentException("length: " + length + ", greater than vector dimensions: " + a.byteSize()); - } - int i = 0; - int res = 0; - if (length >= SQR_STRIDE) { - i += length & ~(SQR_STRIDE - 1); - res = sqr8s(a, b, i); + /** + * Computes the dot product of given byte vectors. + * + * @param a address of the first vector + * @param b address of the second vector + * @param length the vector dimensions + */ + static int dotProduct(MemorySegment a, MemorySegment b, int length) { + assert length >= 0; + if (a.byteSize() != b.byteSize()) { + throw new IllegalArgumentException("dimensions differ: " + a.byteSize() + "!=" + b.byteSize()); + } + if (length > a.byteSize()) { + throw new IllegalArgumentException("length: " + length + ", greater than vector dimensions: " + a.byteSize()); + } + int i = 0; + int res = 0; + if (length >= DOT_STRIDE) { + i += length & ~(DOT_STRIDE - 1); + res = dot8s(a, b, i); + } + + // tail + for (; i < length; i++) { + res += a.get(JAVA_BYTE, i) * b.get(JAVA_BYTE, i); + } + assert i == length; + return res; } - // tail - for (; i < length; i++) { - int dist = a.get(JAVA_BYTE, i) - b.get(JAVA_BYTE, i); - res += dist * dist; + /** + * Computes the square distance of given byte vectors. + * + * @param a address of the first vector + * @param b address of the second vector + * @param length the vector dimensions + */ + static int squareDistance(MemorySegment a, MemorySegment b, int length) { + assert length >= 0; + if (a.byteSize() != b.byteSize()) { + throw new IllegalArgumentException("dimensions differ: " + a.byteSize() + "!=" + b.byteSize()); + } + if (length > a.byteSize()) { + throw new IllegalArgumentException("length: " + length + ", greater than vector dimensions: " + a.byteSize()); + } + int i = 0; + int res = 0; + if (length >= SQR_STRIDE) { + i += length & ~(SQR_STRIDE - 1); + res = sqr8s(a, b, i); + } + + // tail + for (; i < length; i++) { + int dist = a.get(JAVA_BYTE, i) - b.get(JAVA_BYTE, i); + res += dist * dist; + } + assert i == length; + return res; } - assert i == length; - return res; - } - private static int dot8Stride() { - try { - return (int) dot8stride$mh.invokeExact(); - } catch (Throwable t) { - throw new AssertionError(t); + private static int dot8Stride() { + try { + return (int) dot8stride$mh.invokeExact(); + } catch (Throwable t) { + throw new AssertionError(t); + } } - } - private static int sqr8Stride() { - try { - return (int) sqr8stride$mh.invokeExact(); - } catch (Throwable t) { - throw new AssertionError(t); + private static int sqr8Stride() { + try { + return (int) sqr8stride$mh.invokeExact(); + } catch (Throwable t) { + throw new AssertionError(t); + } } - } - private static int dot8s(MemorySegment a, MemorySegment b, int length) { - try { - return (int) dot8s$mh.invokeExact(a, b, length); - } catch (Throwable t) { - throw new AssertionError(t); + private static int dot8s(MemorySegment a, MemorySegment b, int length) { + try { + return (int) dot8s$mh.invokeExact(a, b, length); + } catch (Throwable t) { + throw new AssertionError(t); + } } - } - private static int sqr8s(MemorySegment a, MemorySegment b, int length) { - try { - return (int) sqr8s$mh.invokeExact(a, b, length); - } catch (Throwable t) { - throw new AssertionError(t); + private static int sqr8s(MemorySegment a, MemorySegment b, int length) { + try { + return (int) sqr8s$mh.invokeExact(a, b, length); + } catch (Throwable t) { + throw new AssertionError(t); + } } - } - - static final MethodHandle DOT_HANDLE; - static final MethodHandle SQR_HANDLE; - static { - try { - var lookup = MethodHandles.lookup(); - var mt = MethodType.methodType(int.class, MemorySegment.class, MemorySegment.class, int.class); - DOT_HANDLE = lookup.findStatic(JdkVectorLibrary.class, "dotProduct", mt); - SQR_HANDLE = lookup.findStatic(JdkVectorLibrary.class, "squareDistance", mt); - } catch (NoSuchMethodException | IllegalAccessException e) { - throw new RuntimeException(e); + static final MethodHandle DOT_HANDLE; + static final MethodHandle SQR_HANDLE; + + static { + try { + var lookup = MethodHandles.lookup(); + var mt = MethodType.methodType(int.class, MemorySegment.class, MemorySegment.class, int.class); + DOT_HANDLE = lookup.findStatic(JdkVectorSimilarityFunctions.class, "dotProduct", mt); + SQR_HANDLE = lookup.findStatic(JdkVectorSimilarityFunctions.class, "squareDistance", mt); + } catch (NoSuchMethodException | IllegalAccessException e) { + throw new RuntimeException(e); + } } - } - @Override - public MethodHandle dotProductHandle() { - return DOT_HANDLE; - } + @Override + public MethodHandle dotProductHandle() { + return DOT_HANDLE; + } - @Override - public MethodHandle squareDistanceHandle() { - return SQR_HANDLE; + @Override + public MethodHandle squareDistanceHandle() { + return SQR_HANDLE; + } } } diff --git a/libs/vec/native/publish_vec_binaries.sh b/libs/vec/native/publish_vec_binaries.sh index 6cdea109c2eb7..7c460eb0321c9 100755 --- a/libs/vec/native/publish_vec_binaries.sh +++ b/libs/vec/native/publish_vec_binaries.sh @@ -19,7 +19,7 @@ if [ -z "$ARTIFACTORY_API_KEY" ]; then exit 1; fi -VERSION="1.0.1" +VERSION="1.0.3" ARTIFACTORY_REPOSITORY="${ARTIFACTORY_REPOSITORY:-https://artifactory.elastic.dev/artifactory/elasticsearch-native/}" TEMP=$(mktemp -d) diff --git a/libs/vec/native/src/vec/c/vec.c b/libs/vec/native/src/vec/c/vec.c index 008129b665d01..46cc6722d01d0 100644 --- a/libs/vec/native/src/vec/c/vec.c +++ b/libs/vec/native/src/vec/c/vec.c @@ -18,6 +18,34 @@ #define SQR8S_STRIDE_BYTES_LEN 16 #endif +#ifdef __linux__ + #include + #include + #ifndef HWCAP_NEON + #define HWCAP_NEON 0x1000 + #endif +#endif + +#ifdef __APPLE__ +#include +#endif + +EXPORT int vec_caps() { +#ifdef __APPLE__ + #ifdef TARGET_OS_OSX + // All M series Apple silicon support Neon instructions + return 1; + #else + #error "Unsupported Apple platform" + #endif +#elif __linux__ + int hwcap = getauxval(AT_HWCAP); + return (hwcap & HWCAP_NEON) != 0; +#else + #error "Unsupported aarch64 platform" +#endif +} + EXPORT int dot8s_stride() { return DOT8_STRIDE_BYTES_LEN; } diff --git a/libs/vec/native/src/vec/headers/vec.h b/libs/vec/native/src/vec/headers/vec.h index a717ad2712e1c..380111107f383 100644 --- a/libs/vec/native/src/vec/headers/vec.h +++ b/libs/vec/native/src/vec/headers/vec.h @@ -6,7 +6,15 @@ * Side Public License, v 1. */ +#ifdef _MSC_VER +#define EXPORT extern "C" __declspec(dllexport) +#elif defined(__GNUC__) && !defined(__clang__) #define EXPORT __attribute__((externally_visible,visibility("default"))) +#elif __clang__ +#define EXPORT __attribute__((visibility("default"))) +#endif + +EXPORT int vec_caps(); EXPORT int dot8s_stride(); diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java index 7af912fe3efa2..cfaf4b77a07be 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -230,7 +230,7 @@ public InternalAutoDateHistogram(StreamInput in) throws IOException { bucketInnerInterval = 1; // Calculated on merge. } // we changed the order format in 8.13 for partial reduce, therefore we need to order them to perform merge sort - if (in.getTransportVersion().between(TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { + if (in.getTransportVersion().between(TransportVersions.V_8_13_0, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { // list is mutable by #readCollectionAsList contract buckets.sort(Comparator.comparingLong(b -> b.key)); } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java index 255a78408eb6d..53142f6cdf601 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java @@ -122,18 +122,16 @@ protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCt SortedNumericDocValues docValues = numericVS.longValues(aggCtx.getLeafReaderContext()); dimensionConsumers.put(entry.getKey(), (docId, tsidBuilder) -> { if (docValues.advanceExact(docId)) { - for (int i = 0; i < docValues.docValueCount(); i++) { - tsidBuilder.addLong(fieldName, docValues.nextValue()); - } + assert docValues.docValueCount() == 1 : "Dimension field cannot be a multi-valued field"; + tsidBuilder.addLong(fieldName, docValues.nextValue()); } }); } else { SortedBinaryDocValues docValues = entry.getValue().bytesValues(aggCtx.getLeafReaderContext()); dimensionConsumers.put(entry.getKey(), (docId, tsidBuilder) -> { if (docValues.advanceExact(docId)) { - for (int i = 0; i < docValues.docValueCount(); i++) { - tsidBuilder.addString(fieldName, docValues.nextValue()); - } + assert docValues.docValueCount() == 1 : "Dimension field cannot be a multi-valued field"; + tsidBuilder.addString(fieldName, docValues.nextValue()); } }); } diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/ignored_metadata_field.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/ignored_metadata_field.yml new file mode 100644 index 0000000000000..fd15d24a5f3ca --- /dev/null +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/ignored_metadata_field.yml @@ -0,0 +1,302 @@ +setup: + - do: + indices.create: + index: test + body: + mappings: + properties: + city: + type: keyword + ignore_above: 10 + email: + type: keyword + ignore_above: 20 + date_of_birth: + type: date + format: "dd-MM-yyyy" + ignore_malformed: true + newsletter: + type: boolean + ignore_malformed: true + ip_address: + type: ip + ignore_malformed: true + products: + type: keyword + ignore_above: 12 + total_price: + type: double + ignore_malformed: true + location: + type: geo_point + ignore_malformed: true + order_datetime: + type: date + format: "yyyy-MM-dd HH:mm:ss" + ignore_malformed: true + + - do: + bulk: + index: test + refresh: true + body: + - { "index": { "_id": "001" } } + - { "city": "Milano", email: "alice@gmail.com", date_of_birth: "12-03-1990", newsletter: true, ip_address: "130.34.45.202", products: ["it-002-4567", "it-001-6679"], total_price: "57.99", location: [45.46, 9.16], order_datetime: "2021-05-01 20:01:37" } + - { "index": { "_id": "002" } } + - { "city": "Roma", email: "bob@gmail.com", date_of_birth: "15-05-1991", newsletter: false, ip_address: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", products: [ "it-002-112467", "it-002-5579" ], total_price: "10.99", location: [ -44.78, 19.20 ], order_datetime: "2021-05-01 20:01:37" } + - { "index": { "_id": "003" } } + - { "city": "Venezia", email: "alice@gmail.com", date_of_birth: "01-09-1994", newsletter: false, ip_address: "fe80::1", products: [ "it-002", "it-003-17171717" ], total_price: "-12.99", location: [ 182.22, "20.12" ], order_datetime: "2021-05-02" } + - { "index": { "_id": "004" } } + - { "city": "Cortina d'Ampezzo", email: "a-very-long-email-address-that-should-be-ignored@gmail.com", date_of_birth: "05-06-1989", newsletter: t, ip_address: "::1", products: [ "it101020203030", "it" ], total_price: "57", location: [ 0, 9.16 ], order_datetime: "2021-05-01-20:01:37" } + - { "index": { "_id": "005" } } + - { "city": "Cortina d'Ampezzo", email: "dave@gmail.com", date_of_birth: "12-03-1990 12:30:45", newsletter: t, ip_address: "130.999.36.201", products: [ "it-002-2213", "it-001-7709" ], total_price: "twentytree/12", location: [ "45.33, 8.20" ], order_datetime: "20210501 20:01:37" } + - { "index": { "_id": "006" } } + - { "city": "Milano", email: "eric@gmail.com", date_of_birth: "19-12-90", newsletter: f, ip_address: "130.34.45", products: [ "it-002-555", "it-001-5589990000" ], total_price: "", location: [ "45.99", "9.16" ], order_datetime: "2021-05-01 20:01:37.123" } + - { "index": { "_id": "007" } } + - { "city": "Venezia", email: "luke-skywalker@gmail.com", date_of_birth: "20/03/1992", newsletter: f, ip_address: "130..45.202", products: [ "it-002-1234", "it-001-1213" ], total_price: "57.99.12", location: [ 45, 20 ], order_datetime: "2021-05-03 19:38:22" } + - { "index": { "_id": "008" } } + - { "city": "Firenze", email: "bob@gmail.com", date_of_birth: "02311988", newsletter: "", ip_address: ":::1", products: ["", ""], total_price: "0.0", location: [ 46.22, 11.22 ], order_datetime: "2021-05-03 20:01" } + - { "index": { "_id": "009" } } + - { "city": "Firenze", email: "tom@gmail.com", date_of_birth: "16-11-1990", newsletter: "not_sure", ip_address: "2001:0db8::1234:5678::", products: "it-002-4567", total_price: "0,99", location: [ 18.18, 19.19 ], order_datetime: "2021-05-03 20-01-55" } + - { "index": { "_id": "010" } } + - { "city": "Cortina d'Ampezzo", email: "alice@gmail.com", date_of_birth: "18-12-1992", newsletter: "false", ip_address: ":::1", products: "it-002-1890994567", total_price: "14,27", location: [ 45.46-9.16 ], order_datetime: "2021-05-01 20:05:37" } + - { "index": { "_id": "011" } } + - { "city": "Roma", email: "paul@gmail.com", date_of_birth: "17.15.1990", newsletter: "true", ip_address: "", products: [ "it-002-1019", "it-001-5578", "it-009-9901256" ], total_price: "49.99", location: 45.22, order_datetime: "2021-05-01T20:02:00" } + +--- +"terms aggregation on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + ignored_terms: + terms: + field: _ignored + + - match: { hits.total.value: 11 } + - length: { aggregations.ignored_terms.buckets: 9 } + - match: { aggregations.ignored_terms.buckets.0.key: "ip_address" } + - match: { aggregations.ignored_terms.buckets.0.doc_count: 7 } + - match: { aggregations.ignored_terms.buckets.1.key: "order_datetime" } + - match: { aggregations.ignored_terms.buckets.1.doc_count: 7 } + - match: { aggregations.ignored_terms.buckets.2.key: "products" } + - match: { aggregations.ignored_terms.buckets.2.doc_count: 6 } + - match: { aggregations.ignored_terms.buckets.3.key: "date_of_birth" } + - match: { aggregations.ignored_terms.buckets.3.doc_count: 5 } + - match: { aggregations.ignored_terms.buckets.4.key: "newsletter" } + - match: { aggregations.ignored_terms.buckets.4.doc_count: 5 } + - match: { aggregations.ignored_terms.buckets.5.key: "total_price" } + - match: { aggregations.ignored_terms.buckets.5.doc_count: 4 } + - match: { aggregations.ignored_terms.buckets.6.key: "city" } + - match: { aggregations.ignored_terms.buckets.6.doc_count: 3 } + - match: { aggregations.ignored_terms.buckets.7.key: "location" } + - match: { aggregations.ignored_terms.buckets.7.doc_count: 3 } + - match: { aggregations.ignored_terms.buckets.8.key: "email" } + - match: { aggregations.ignored_terms.buckets.8.doc_count: 2 } + +--- +"terms aggregation on _ignored metadata field with top hits": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + ignored_terms: + terms: + field: _ignored + size: 3 + aggs: + top_by_datetime: + top_hits: + sort: + - order_datetime: { order: desc } + size: 1 + + - match: { hits.total.value: 11 } + - length: { aggregations.ignored_terms.buckets: 3 } + + - match: { aggregations.ignored_terms.buckets.0.key: "ip_address" } + - match: { aggregations.ignored_terms.buckets.0.doc_count: 7 } + - match: { aggregations.ignored_terms.buckets.0.top_by_datetime.hits.hits.0._ignored: ["date_of_birth", "email", "ip_address", "newsletter", "total_price"]} + + - match: { aggregations.ignored_terms.buckets.1.key: "order_datetime" } + - match: { aggregations.ignored_terms.buckets.1.doc_count: 7 } + - match: { aggregations.ignored_terms.buckets.1.top_by_datetime.hits.hits.0._ignored: ["order_datetime", "products"]} + + - match: { aggregations.ignored_terms.buckets.2.key: "products" } + - match: { aggregations.ignored_terms.buckets.2.doc_count: 6 } + - match: { aggregations.ignored_terms.buckets.2.top_by_datetime.hits.hits.0._ignored: ["city", "ip_address", "location", "products", "total_price"]} + +--- +"date histogram aggregation with terms on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + order_datetime_histo: + date_histogram: + field: order_datetime + calendar_interval: day + aggs: + ignored_terms: + terms: + field: _ignored + size: 2 + + - match: { hits.total.value: 11 } + - length: { aggregations.order_datetime_histo.buckets: 3 } + + - match: { aggregations.order_datetime_histo.buckets.0.key_as_string: "2021-05-01 00:00:00" } + - match: { aggregations.order_datetime_histo.buckets.0.doc_count: 3 } + - match: { aggregations.order_datetime_histo.buckets.0.ignored_terms.buckets.0: { key: "products", doc_count: 2 } } + + - match: { aggregations.order_datetime_histo.buckets.1.key_as_string: "2021-05-02 00:00:00" } + - match: { aggregations.order_datetime_histo.buckets.1.doc_count: 0 } + - length: { aggregations.order_datetime_histo.buckets.1.ignored_terms.buckets: 0 } + + - match: { aggregations.order_datetime_histo.buckets.2.key_as_string: "2021-05-03 00:00:00" } + - match: { aggregations.order_datetime_histo.buckets.2.doc_count: 1 } + - match: { aggregations.order_datetime_histo.buckets.2.ignored_terms.buckets.0: { key: "date_of_birth", doc_count: 1 } } + - match: { aggregations.order_datetime_histo.buckets.2.ignored_terms.buckets.1: { key: "email", doc_count: 1 } } + +--- +"cardinality aggregation on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + ignored_cardinality: + cardinality: + field: _ignored + + - match: { hits.total.value: 11 } + - match: {aggregations.ignored_cardinality.value: 9 } + +--- +"value count aggregation on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + ignored_value_count: + value_count: + field: _ignored + + - match: { hits.total.value: 11 } + - match: {aggregations.ignored_value_count.value: 42 } + +--- +"date range aggregation with terms on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + order_datetime_range: + date_range: + field: order_datetime + format: "dd-MM-yyyy" + ranges: + - to: "03-05-2021" + - from: "02-05-2021" + aggs: + ignored_terms: + terms: + field: _ignored + + - match: { hits.total.value: 11 } + - length: { aggregations.order_datetime_range.buckets: 2 } + + - match: { aggregations.order_datetime_range.buckets.0.to_as_string: "03-05-2021" } + - match: { aggregations.order_datetime_range.buckets.0.doc_count: 3 } + - length: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets: 5 } + - match: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets.0: { key: "products", doc_count: 2 } } + - match: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets.1: { key: "city", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets.2: { key: "ip_address", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets.3: { key: "location", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets.4: { key: "total_price", doc_count: 1 } } + + - match: { aggregations.order_datetime_range.buckets.1.from_as_string: "02-05-2021" } + - match: { aggregations.order_datetime_range.buckets.1.doc_count: 1 } + - length: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets: 5 } + - match: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets.0: { key: "date_of_birth", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets.1: { key: "email", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets.2: { key: "ip_address", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets.3: { key: "newsletter", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets.4: { key: "total_price", doc_count: 1 } } + +--- +"random sampler aggregation with terms on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + sample: + random_sampler: + probability: 1.0 # make sure buckets count is consistent + seed: 43 + aggs: + ignored_terms: + terms: + field: _ignored + + - match: { hits.total.value: 11 } + - length: { aggregations.sample.ignored_terms.buckets: 9 } + - match: { aggregations.sample.ignored_terms.buckets.0: { key: "ip_address", doc_count: 7 } } + - match: { aggregations.sample.ignored_terms.buckets.1: { key: "order_datetime", doc_count: 7 } } + - match: { aggregations.sample.ignored_terms.buckets.2: { key: "products", doc_count: 6 } } + - match: { aggregations.sample.ignored_terms.buckets.3: { key: "date_of_birth", doc_count: 5 } } + - match: { aggregations.sample.ignored_terms.buckets.4: { key: "newsletter", doc_count: 5 } } + - match: { aggregations.sample.ignored_terms.buckets.5: { key: "total_price", doc_count: 4 } } + - match: { aggregations.sample.ignored_terms.buckets.6: { key: "city", doc_count: 3 } } + - match: { aggregations.sample.ignored_terms.buckets.7: { key: "location", doc_count: 3 } } + - match: { aggregations.sample.ignored_terms.buckets.8: { key: "email", doc_count: 2 } } + +--- +"filter aggregation on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + features: close_to + - do: + search: + body: + size: 0 + aggs: + total: + sum: + field: total_price + filter_ignored: + filter: + term: + _ignored: "email" + + - match: { hits.total.value: 11 } + - close_to: { aggregations.total.value: { value: 162.98, error: 0.01 } } + - match: { aggregations.filter_ignored.doc_count: 2 } diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml index 5e703349a41ec..c8c3b032200b7 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml @@ -662,6 +662,23 @@ - match: { tokens.0.token: baz } - match: { tokens.1.token: baz } +--- +"stemmer_override file access": + - do: + catch: bad_request + indices.create: + index: test + body: + settings: + analysis: + filter: + my_stemmer_override: + type: stemmer_override + rules_path: "jvm.options" + - match: { status: 400 } + - match: { error.type: illegal_argument_exception } + - match: { error.reason: "/Access.denied.trying.to.read.file.rules_path.*/" } + --- "decompounder": - do: diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java index eed06c5c69332..2370cca08b23e 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java @@ -19,6 +19,7 @@ import java.util.Map; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -550,9 +551,8 @@ public void testNoSubobjects() throws Exception { // "start-timestamp" doesn't match the ECS dynamic mapping pattern "*_timestamp" assertThat(fields.get("test.start-timestamp"), is(List.of("not a date"))); assertThat(ignored.size(), is(2)); - assertThat(ignored.get(0), is("vulnerability.textual_score")); + assertThat(ignored, containsInAnyOrder("test.start_timestamp", "vulnerability.textual_score")); // the ECS date dynamic template enforces mapping of "*_timestamp" fields to a date type - assertThat(ignored.get(1), is("test.start_timestamp")); assertThat(ignoredFieldValues.get("test.start_timestamp").size(), is(1)); assertThat(ignoredFieldValues.get("test.start_timestamp"), is(List.of("not a date"))); assertThat(ignoredFieldValues.get("vulnerability.textual_score").size(), is(1)); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java index e734c913fe9e8..a10a955b33975 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestDataStreamLifecycleStatsAction extends BaseRestHandler { @@ -36,7 +37,7 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { GetDataStreamLifecycleStatsAction.Request request = new GetDataStreamLifecycleStatsAction.Request(); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute( GetDataStreamLifecycleStatsAction.INSTANCE, request, diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java index 522ce12d834a8..048ef0bab8e0c 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestExplainDataStreamLifecycleAction extends BaseRestHandler { @@ -41,7 +42,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient ExplainDataStreamLifecycleAction.Request explainRequest = new ExplainDataStreamLifecycleAction.Request(indices); explainRequest.includeDefaults(restRequest.paramAsBoolean("include_defaults", false)); explainRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, IndicesOptions.strictExpandOpen())); - explainRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", explainRequest.masterNodeTimeout())); + explainRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute( ExplainDataStreamLifecycleAction.INSTANCE, explainRequest, diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamLifecycleAction.java index 70228a16d7a01..736aad08d9212 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamLifecycleAction.java @@ -22,6 +22,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestPutDataStreamLifecycleAction extends BaseRestHandler { @@ -41,7 +42,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli try (XContentParser parser = request.contentParser()) { PutDataStreamLifecycleAction.Request putLifecycleRequest = PutDataStreamLifecycleAction.Request.parseRequest(parser); putLifecycleRequest.indices(Strings.splitStringByCommaToArray(request.param("name"))); - putLifecycleRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putLifecycleRequest.masterNodeTimeout())); + putLifecycleRequest.masterNodeTimeout(getMasterNodeTimeout(request)); putLifecycleRequest.ackTimeout(request.paramAsTime("timeout", putLifecycleRequest.ackTimeout())); putLifecycleRequest.indicesOptions(IndicesOptions.fromRequest(request, putLifecycleRequest.indicesOptions())); return channel -> client.execute( diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestModifyDataStreamsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestModifyDataStreamsAction.java index 006422e4c04e7..d4d6af4091691 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestModifyDataStreamsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestModifyDataStreamsAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestModifyDataStreamsAction extends BaseRestHandler { @@ -43,7 +44,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli if (modifyDsRequest.getActions() == null || modifyDsRequest.getActions().isEmpty()) { throw new IllegalArgumentException("no data stream actions specified, at least one must be specified"); } - modifyDsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", modifyDsRequest.masterNodeTimeout())); + modifyDsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); modifyDsRequest.ackTimeout(request.paramAsTime("timeout", modifyDsRequest.ackTimeout())); return channel -> client.execute(ModifyDataStreamsAction.INSTANCE, modifyDsRequest, new RestToXContentListener<>(channel)); } diff --git a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml index eff9a9beb35bc..40d646cc645f5 100644 --- a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml +++ b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml @@ -120,7 +120,7 @@ teardown: --- profile fetch: - skip: - version: ' - 8.13.99' + version: ' - 8.14.99' reason: fetch fields and stored_fields using ValueFetcher - do: @@ -140,7 +140,7 @@ profile fetch: - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } - - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _ignored, _routing, _source] } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } - length: { profile.shards.0.fetch.children: 4 } - match: { profile.shards.0.fetch.children.0.type: FetchFieldsPhase } - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader_count: 0 } diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java index a4f939fbe3af8..e0396039029c5 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java @@ -19,6 +19,7 @@ import java.util.Collection; import java.util.List; +import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; @@ -38,6 +39,11 @@ protected Collection remoteClusterAlias() { return List.of(REMOTE_CLUSTER); } + @Override + protected Map skipUnavailableForRemoteClusters() { + return Map.of(REMOTE_CLUSTER, false); + } + @Override protected Collection> nodePlugins(String clusterAlias) { return List.of(ReindexPlugin.class); diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapperTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapperTests.java index 3a48a6bcce4e0..99017733dd989 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapperTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapperTests.java @@ -281,8 +281,8 @@ public void testIgnoreAbove() throws IOException { fields = doc.rootDoc().getFields("field"); assertThat(fields, empty()); fields = doc.rootDoc().getFields("_ignored"); - assertEquals(1, fields.size()); - assertEquals("field", fields.get(0).stringValue()); + assertEquals(2, fields.size()); + assertTrue(fields.stream().anyMatch(field -> "field".equals(field.stringValue()))); } public void testUpdateIgnoreAbove() throws IOException { diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java index cc613671c860c..a8cff14ff6220 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java @@ -101,6 +101,7 @@ public class CcsCommonYamlTestSuiteIT extends ESClientYamlSuiteTestCase { .setting("node.roles", "[data,ingest,master,remote_cluster_client]") .setting("cluster.remote.remote_cluster.seeds", () -> "\"" + remoteCluster.getTransportEndpoint(0) + "\"") .setting("cluster.remote.connections_per_cluster", "1") + .setting("cluster.remote.remote_cluster.skip_unavailable", "false") .apply(commonClusterConfig) .build(); diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java index 5a58f3629df14..e3639ffabf664 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java @@ -246,6 +246,7 @@ public void initSearchClient() throws IOException { private static void configureRemoteCluster() throws IOException { final Settings.Builder builder = Settings.builder(); + builder.put("cluster.remote." + REMOTE_CLUSTER_NAME + ".skip_unavailable", "false"); if (randomBoolean()) { builder.put("cluster.remote." + REMOTE_CLUSTER_NAME + ".mode", "proxy") .put("cluster.remote." + REMOTE_CLUSTER_NAME + ".proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/ESPolicyUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/ESPolicyUnitTests.java index bd0418abc27a8..bd26146f92c0d 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/ESPolicyUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/ESPolicyUnitTests.java @@ -47,22 +47,20 @@ public void testNullCodeSource() throws Exception { Permission all = new AllPermission(); PermissionCollection allCollection = all.newPermissionCollection(); allCollection.add(all); - ESPolicy policy = new ESPolicy(TEST_CODEBASES, allCollection, Collections.emptyMap(), true, List.of()); + ESPolicy policy = new ESPolicy(TEST_CODEBASES, allCollection, Collections.emptyMap(), true, List.of(), List.of()); // restrict ourselves to NoPermission PermissionCollection noPermissions = new Permissions(); assertFalse(policy.implies(new ProtectionDomain(null, noPermissions), new FilePermission("foo", "read"))); } /** - * test with null location - *

- * its unclear when/if this happens, see https://bugs.openjdk.java.net/browse/JDK-8129972 + * As of JDK 9, {@link CodeSource#getLocation} is documented to potentially return {@code null} */ @SuppressForbidden(reason = "to create FilePermission object") public void testNullLocation() throws Exception { assumeTrue("test cannot run with security manager", System.getSecurityManager() == null); PermissionCollection noPermissions = new Permissions(); - ESPolicy policy = new ESPolicy(TEST_CODEBASES, noPermissions, Collections.emptyMap(), true, List.of()); + ESPolicy policy = new ESPolicy(TEST_CODEBASES, noPermissions, Collections.emptyMap(), true, List.of(), List.of()); assertFalse( policy.implies( new ProtectionDomain(new CodeSource(null, (Certificate[]) null), noPermissions), @@ -74,7 +72,7 @@ public void testNullLocation() throws Exception { public void testListen() { assumeTrue("test cannot run with security manager", System.getSecurityManager() == null); final PermissionCollection noPermissions = new Permissions(); - final ESPolicy policy = new ESPolicy(TEST_CODEBASES, noPermissions, Collections.emptyMap(), true, List.of()); + final ESPolicy policy = new ESPolicy(TEST_CODEBASES, noPermissions, Collections.emptyMap(), true, List.of(), List.of()); assertFalse( policy.implies( new ProtectionDomain(ESPolicyUnitTests.class.getProtectionDomain().getCodeSource(), noPermissions), @@ -91,7 +89,8 @@ public void testDataPathPermissionIsChecked() { new Permissions(), Collections.emptyMap(), true, - List.of(new FilePermission("/home/elasticsearch/data/-", "read")) + List.of(new FilePermission("/home/elasticsearch/data/-", "read")), + List.of() ); assertTrue( policy.implies( @@ -100,4 +99,29 @@ public void testDataPathPermissionIsChecked() { ) ); } + + @SuppressForbidden(reason = "to create FilePermission object") + public void testForbiddenFilesAreForbidden() { + assumeTrue("test cannot run with security manager", System.getSecurityManager() == null); + + FilePermission configPerm = new FilePermission("/home/elasticsearch/config/-", "read"); + PermissionCollection coll = configPerm.newPermissionCollection(); + coll.add(configPerm); + + ESPolicy policy = new ESPolicy( + TEST_CODEBASES, + coll, + Collections.emptyMap(), + true, + List.of(), + List.of(new FilePermission("/home/elasticsearch/config/forbidden.yml", "read")) + ); + ProtectionDomain pd = new ProtectionDomain( + new CodeSource(randomBoolean() ? null : randomFrom(TEST_CODEBASES.values()), (Certificate[]) null), + new Permissions() + ); + + assertTrue(policy.implies(pd, new FilePermission("/home/elasticsearch/config/config.yml", "read"))); + assertFalse(policy.implies(pd, new FilePermission("/home/elasticsearch/config/forbidden.yml", "read"))); + } } diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle index 23c46c5804a6e..d0cbc208f4d8e 100644 --- a/qa/multi-cluster-search/build.gradle +++ b/qa/multi-cluster-search/build.gradle @@ -48,6 +48,7 @@ BuildParams.bwcVersions.withWireCompatible(ccsSupportedVersion) { bwcVersion, ba setting 'cluster.remote.connections_per_cluster', '1' setting 'cluster.remote.my_remote_cluster.seeds', { "\"${remoteCluster.get().getAllTransportPortURI().get(0)}\"" } + setting 'cluster.remote.my_remote_cluster.skip_unavailable', 'false' } tasks.register("${baseName}#remote-cluster", RestIntegTestTask) { diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml index 8bbbc7435ff5a..da1245268a0a2 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml @@ -249,7 +249,7 @@ persistent: cluster.remote.test_remote_cluster.seeds: $remote_ip - - match: {persistent: {cluster.remote.test_remote_cluster.seeds: $remote_ip}} + - match: {persistent.cluster\.remote\.test_remote_cluster\.seeds: $remote_ip} - do: search: diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml index 144990163583b..da4c91869e53d 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml @@ -113,33 +113,33 @@ - do: cluster.remote_info: {} - - is_false: remote1.skip_unavailable + - is_true: remote1.skip_unavailable - do: cluster.put_settings: body: persistent: - cluster.remote.remote1.skip_unavailable: true + cluster.remote.remote1.skip_unavailable: false - - is_true: persistent.cluster.remote.remote1.skip_unavailable + - is_false: persistent.cluster.remote.remote1.skip_unavailable - do: cluster.remote_info: {} - - is_true: remote1.skip_unavailable + - is_false: remote1.skip_unavailable - do: cluster.put_settings: body: persistent: - cluster.remote.remote1.skip_unavailable: false + cluster.remote.remote1.skip_unavailable: true - - is_false: persistent.cluster.remote.remote1.skip_unavailable + - is_true: persistent.cluster.remote.remote1.skip_unavailable - do: cluster.remote_info: {} - - is_false: remote1.skip_unavailable + - is_true: remote1.skip_unavailable - do: cluster.put_settings: @@ -152,7 +152,7 @@ - do: cluster.remote_info: {} - - is_false: remote1.skip_unavailable + - is_true: remote1.skip_unavailable - do: cluster.put_settings: diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IgnoredMetaFieldRollingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IgnoredMetaFieldRollingUpgradeIT.java new file mode 100644 index 0000000000000..874fac615b9b1 --- /dev/null +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IgnoredMetaFieldRollingUpgradeIT.java @@ -0,0 +1,210 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.mapper.IgnoredFieldMapper; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +public class IgnoredMetaFieldRollingUpgradeIT extends ParameterizedRollingUpgradeTestCase { + + private static final String TERMS_AGG_QUERY = Strings.format(""" + { + "aggs": { + "ignored_terms": { + "terms": { + "field": "_ignored" + } + } + } + }"""); + + public IgnoredMetaFieldRollingUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { + super(upgradedNodes); + } + + public void testAggregation() throws IOException { + if (isOldCluster()) { + assertRestStatus(client().performRequest(createNewIndex("index-old-agg")), RestStatus.OK); + assertRestStatus(client().performRequest(indexDocument("index-old-agg", "foofoo", "1024.12.321.777", "1")), RestStatus.CREATED); + if (getOldClusterIndexVersion().before(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD)) { + assertTermsAggIgnoredMetadataFieldException( + "index-old-agg", + "Fielddata is not supported on field [_ignored] of type [_ignored]" + ); + } else { + assertTermsAggIgnoredMetadataField("index-old-agg"); + } + } else if (isUpgradedCluster()) { + assertRestStatus(client().performRequest(waitForClusterStatus("green", "90s")), RestStatus.OK); + assertRestStatus(client().performRequest(createNewIndex("index-new-agg")), RestStatus.OK); + assertRestStatus(client().performRequest(indexDocument("index-new-agg", "barbar", "555.222.111.000", "2")), RestStatus.CREATED); + + assertTermsAggIgnoredMetadataField("index-*"); + if (getOldClusterIndexVersion().before(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD)) { + assertTermsAggIgnoredMetadataFieldException( + "index-old-agg", + "Fielddata is not supported on field [_ignored] of type [_ignored]" + ); + } else { + assertTermsAggIgnoredMetadataField("index-old-agg"); + } + assertTermsAggIgnoredMetadataField("index-new-agg"); + } + } + + public void testIgnoredMetaFieldGetWithIgnoredQuery() throws IOException { + if (isOldCluster()) { + assertRestStatus(client().performRequest(createNewIndex("old-get-ignored-index")), RestStatus.OK); + assertRestStatus( + client().performRequest(indexDocument("old-get-ignored-index", "foofoo", "192.168.10.1234", "1")), + RestStatus.CREATED + ); + final Map doc = entityAsMap(getWithIgnored("old-get-ignored-index", "1")); + assertThat(((List) doc.get(IgnoredFieldMapper.NAME)), Matchers.containsInAnyOrder("ip_address", "keyword")); + } else if (isUpgradedCluster()) { + assertRestStatus(client().performRequest(waitForClusterStatus("green", "90s")), RestStatus.OK); + assertRestStatus( + client().performRequest(indexDocument("old-get-ignored-index", "barbar", "192.168.256.256", "2")), + RestStatus.CREATED + ); + final Map doc = entityAsMap(getWithIgnored("old-get-ignored-index", "2")); + // NOTE: here we are reading documents from an index created by an older version of Elasticsearch where the _ignored + // field could be stored depending on the version of Elasticsearch which created the index. The mapper for the _ignored field + // will keep the stored field if necessary to avoid mixing documents where the _ignored field is stored and documents where it + // is not, in the same index. + assertThat(((List) doc.get(IgnoredFieldMapper.NAME)), Matchers.containsInAnyOrder("ip_address", "keyword")); + + // NOTE: The stored field is dropped only once a new index is created by a new version of Elasticsearch. + final String newVersionIndexName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); + assertRestStatus(client().performRequest(createNewIndex(newVersionIndexName)), RestStatus.OK); + assertRestStatus(client().performRequest(indexDocument(newVersionIndexName, "foobar", "192.168.777", "3")), RestStatus.CREATED); + final Map docFromNewIndex = entityAsMap(getWithIgnored(newVersionIndexName, "3")); + assertThat(((List) docFromNewIndex.get(IgnoredFieldMapper.NAME)), Matchers.containsInAnyOrder("ip_address", "keyword")); + } + } + + public void testIgnoredMetaFieldGetWithoutIgnoredQuery() throws IOException { + if (isOldCluster()) { + assertRestStatus(client().performRequest(createNewIndex("old-get-index")), RestStatus.OK); + assertRestStatus(client().performRequest(indexDocument("old-get-index", "foofoo", "192.168.169.300", "1")), RestStatus.CREATED); + final Map doc = entityAsMap(get("old-get-index", "1")); + if (getOldClusterIndexVersion().onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD)) { + assertNull(doc.get(IgnoredFieldMapper.NAME)); + } + } else if (isUpgradedCluster()) { + assertRestStatus(client().performRequest(waitForClusterStatus("green", "90s")), RestStatus.OK); + final Map doc1 = entityAsMap(get("old-get-index", "1")); + assertNull(doc1.get(IgnoredFieldMapper.NAME)); + assertRestStatus(client().performRequest(indexDocument("old-get-index", "barbar", "192.168.0.1234", "2")), RestStatus.CREATED); + final Map doc2 = entityAsMap(get("old-get-index", "2")); + assertNull(doc2.get(IgnoredFieldMapper.NAME)); + + final String newVersionIndexName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); + assertRestStatus(client().performRequest(createNewIndex(newVersionIndexName)), RestStatus.OK); + // NOTE: new Elasticsearch version does not used stored field for _ignored due to writing an index created by the new version + assertRestStatus( + client().performRequest(indexDocument(newVersionIndexName, "foobar", "263.192.168.12", "3")), + RestStatus.CREATED + ); + final Map docFromNewIndex = entityAsMap(get(newVersionIndexName, "3")); + assertNull(docFromNewIndex.get(IgnoredFieldMapper.NAME)); + } + } + + private static Response getWithIgnored(final String index, final String docId) throws IOException { + return client().performRequest(new Request("GET", "/" + index + "/_doc/" + docId + "?stored_fields=_ignored")); + } + + private static Response get(final String index, final String docId) throws IOException { + return client().performRequest(new Request("GET", "/" + index + "/_doc/" + docId)); + } + + private static Request waitForClusterStatus(final String statusColor, final String timeoutSeconds) { + final Request waitForGreen = new Request("GET", "/_cluster/health"); + waitForGreen.addParameter("wait_for_status", statusColor); + waitForGreen.addParameter("timeout", timeoutSeconds); + waitForGreen.addParameter("level", "shards"); + return waitForGreen; + } + + private static void assertRestStatus(final Response indexDocumentResponse, final RestStatus restStatus) { + assertThat(indexDocumentResponse.getStatusLine().getStatusCode(), Matchers.equalTo(restStatus.getStatus())); + } + + private static Request createNewIndex(final String indexName) throws IOException { + final Request createIndex = new Request("PUT", "/" + indexName); + final XContentBuilder mappings = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .startObject("mappings") + .startObject("properties") + .startObject("keyword") + .field("type", "keyword") + .field("ignore_above", 3) + .endObject() + .startObject("ip_address") + .field("type", "ip") + .field("ignore_malformed", true) + .endObject() + .endObject() + .endObject() + .endObject(); + createIndex.setJsonEntity(Strings.toString(mappings)); + return createIndex; + } + + private static Request indexDocument(final String indexName, final String keywordValue, final String ipAddressValue, final String docId) + throws IOException { + final Request indexRequest = new Request("POST", "/" + indexName + "/_doc/" + docId); + final XContentBuilder doc = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .field("keyword", keywordValue) + .field("ip_address", ipAddressValue) + .endObject(); + indexRequest.addParameter("refresh", "true"); + indexRequest.setJsonEntity(Strings.toString(doc)); + return indexRequest; + } + + @SuppressWarnings("unchecked") + private static void assertTermsAggIgnoredMetadataField(final String indexPattern) throws IOException { + final Request aggRequest = new Request("POST", "/" + indexPattern + "/_search"); + aggRequest.addParameter("size", "0"); + aggRequest.setJsonEntity(TERMS_AGG_QUERY); + final Response aggResponse = client().performRequest(aggRequest); + final Map aggResponseEntityAsMap = entityAsMap(aggResponse); + final Map aggregations = (Map) aggResponseEntityAsMap.get("aggregations"); + final Map ignoredTerms = (Map) aggregations.get("ignored_terms"); + final List> buckets = (List>) ignoredTerms.get("buckets"); + assertThat(buckets.stream().map(bucket -> bucket.get("key")).toList(), Matchers.containsInAnyOrder("ip_address", "keyword")); + } + + private static void assertTermsAggIgnoredMetadataFieldException(final String indexPattern, final String exceptionMessage) { + final Request aggRequest = new Request("POST", "/" + indexPattern + "/_search"); + aggRequest.addParameter("size", "0"); + aggRequest.setJsonEntity(TERMS_AGG_QUERY); + final Exception responseException = assertThrows(ResponseException.class, () -> client().performRequest(aggRequest)); + assertThat(responseException.getMessage(), Matchers.containsString(exceptionMessage)); + } + +} diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java index f7f46671e2354..eaf439f264ad5 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java @@ -33,6 +33,7 @@ import static org.elasticsearch.cluster.metadata.IndexGraveyard.SETTING_MAX_TOMBSTONES; import static org.elasticsearch.indices.IndicesService.WRITE_DANGLING_INDICES_INFO_SETTING; import static org.elasticsearch.rest.RestStatus.ACCEPTED; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; import static org.elasticsearch.test.XContentTestUtils.createJsonMapView; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -111,7 +112,7 @@ public void testDanglingIndicesCanBeImported() throws Exception { importRequest.addParameter("accept_data_loss", "true"); // Ensure this parameter is accepted importRequest.addParameter("timeout", "20s"); - importRequest.addParameter("master_timeout", "20s"); + importRequest.addParameter(REST_MASTER_TIMEOUT_PARAM, "20s"); final Response importResponse = restClient.performRequest(importRequest); assertThat(importResponse.getStatusLine().getStatusCode(), equalTo(ACCEPTED.getStatus())); @@ -147,7 +148,7 @@ public void testDanglingIndicesCanBeDeleted() throws Exception { deleteRequest.addParameter("accept_data_loss", "true"); // Ensure these parameters is accepted deleteRequest.addParameter("timeout", "20s"); - deleteRequest.addParameter("master_timeout", "20s"); + deleteRequest.addParameter(REST_MASTER_TIMEOUT_PARAM, "20s"); final Response deleteResponse = restClient.performRequest(deleteRequest); assertThat(deleteResponse.getStatusLine().getStatusCode(), equalTo(ACCEPTED.getStatus())); diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 146c78e3c8471..089b7470e9a97 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -83,6 +83,7 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> task.skipTest("search/370_profile/fetch source", "profile output has changed") task.skipTest("search/370_profile/fetch nested source", "profile output has changed") task.skipTest("search/240_date_nanos/doc value fields are working as expected across date and date_nanos fields", "Fetching docvalues field multiple times is no longer allowed") + task.skipTest("search/110_field_collapsing/field collapsing and rescore", "#107779 Field collapsing is compatible with rescore in 8.15") task.replaceValueInMatch("_type", "_doc") task.addAllowedWarningRegex("\\[types removal\\].*") diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index 9376f3598d6f1..7e0ad2bf28969 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -39,9 +39,9 @@ nested is disabled: --- object with unmapped fields: - - skip: - version: " - " - reason: "mapper.track_ignored_source" + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source - do: indices.create: @@ -68,25 +68,33 @@ object with unmapped fields: body: - '{ "create": { } }' - '{ "name": "aaaa", "some_string": "AaAa", "some_int": 1000, "some_double": 123.456789, "some_bool": true, "a.very.deeply.nested.field": "AAAA" }' + - '{ "create": { } }' + - '{ "name": "bbbb", "some_string": "BbBb", "some_int": 2000, "some_double": 321.987654, "some_bool": false, "a.very.deeply.nested.field": "BBBB" }' - do: search: index: test + sort: name - - match: { hits.total.value: 1 } + - match: { hits.total.value: 2 } - match: { hits.hits.0._source.name: aaaa } - match: { hits.hits.0._source.some_string: AaAa } - match: { hits.hits.0._source.some_int: 1000 } - match: { hits.hits.0._source.some_double: 123.456789 } - match: { hits.hits.0._source.a.very.deeply.nested.field: AAAA } - match: { hits.hits.0._source.some_bool: true } + - match: { hits.hits.1._source.name: bbbb } + - match: { hits.hits.1._source.some_string: BbBb } + - match: { hits.hits.1._source.some_int: 2000 } + - match: { hits.hits.1._source.some_double: 321.987654 } + - match: { hits.hits.1._source.a.very.deeply.nested.field: BBBB } --- nested object with unmapped fields: - - skip: - version: " - " - reason: "mapper.track_ignored_source" + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source - do: indices.create: @@ -117,22 +125,28 @@ nested object with unmapped fields: body: - '{ "create": { } }' - '{ "path.to.name": "aaaa", "path.to.surname": "AaAa", "path.some.other.name": "AaAaAa" }' + - '{ "create": { } }' + - '{ "path.to.name": "bbbb", "path.to.surname": "BbBb", "path.some.other.name": "BbBbBb" }' - do: search: index: test + sort: path.to.name - - match: { hits.total.value: 1 } + - match: { hits.total.value: 2 } - match: { hits.hits.0._source.path.to.name: aaaa } - match: { hits.hits.0._source.path.to.surname: AaAa } - match: { hits.hits.0._source.path.some.other.name: AaAaAa } + - match: { hits.hits.1._source.path.to.name: bbbb } + - match: { hits.hits.1._source.path.to.surname: BbBb } + - match: { hits.hits.1._source.path.some.other.name: BbBbBb } --- empty object with unmapped fields: - - skip: - version: " - " - reason: "mapper.track_ignored_source" + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source - do: indices.create: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/110_field_collapsing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/110_field_collapsing.yml index 76207fd76e45b..c10d3c48259f1 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/110_field_collapsing.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/110_field_collapsing.yml @@ -281,24 +281,6 @@ setup: - match: { hits.hits.1.fields.numeric_group: [1] } - match: { hits.hits.1.sort: [1] } ---- -"field collapsing and rescore": - - - do: - catch: /cannot use \`collapse\` in conjunction with \`rescore\`/ - search: - rest_total_hits_as_int: true - index: test - body: - collapse: { field: numeric_group } - rescore: - window_size: 20 - query: - rescore_query: - match_all: {} - query_weight: 1 - rescore_query_weight: 2 - --- "no hits and inner_hits": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/112_field_collapsing_with_rescore.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/112_field_collapsing_with_rescore.yml new file mode 100644 index 0000000000000..5048bc8d4307c --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/112_field_collapsing_with_rescore.yml @@ -0,0 +1,107 @@ +setup: + - skip: + version: " - 8.14.99" + reason: Collapse with rescore added in 8.15.0 + - do: + indices.create: + index: products + body: + mappings: + properties: + product_id: { type: keyword } + description: { type: text } + popularity: { type: integer } + + - do: + bulk: + index: products + refresh: true + body: + - '{"index": {"_id": "1", "routing": "0"}}' + - '{"product_id": "0", "description": "flat tv 4K HDR", "score": 2, "popularity": 30}' + - '{"index": {"_id": "2", "routing": "10"}}' + - '{"product_id": "10", "description": "LED Smart TV 32", "score": 5, "popularity": 100}' + - '{"index": {"_id": "3", "routing": "10"}}' + - '{"product_id": "10", "description": "LED Smart TV 65", "score": 10, "popularity": 50}' + - '{"index": {"_id": "4", "routing": "0"}}' + - '{"product_id": "0", "description": "flat tv", "score": 1, "popularity": 10}' + - '{"index": {"_id": "5", "routing": "129"}}' + - '{"product_id": "129", "description": "just a tv", "score": 100, "popularity": 3}' + +--- +"field collapsing and rescore": + - do: + search: + index: products + body: + query: + bool: + filter: + match: + description: "tv" + should: + script_score: + query: { match_all: { } } + script: + source: "doc['score'].value" + collapse: + field: product_id + rescore: + query: + rescore_query: + script_score: + query: { match_all: { } } + script: + source: "doc['popularity'].value" + query_weight: 0 + rescore_query_weight: 1 + + + - match: {hits.total.value: 5 } + - length: {hits.hits: 3 } + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0._score: 50} + - match: {hits.hits.0.fields.product_id: ["10"]} + - match: { hits.hits.1._id: "1" } + - match: { hits.hits.1._score: 30 } + - match: { hits.hits.1.fields.product_id: ["0"] } + - match: { hits.hits.2._id: "5" } + - match: { hits.hits.2._score: 3 } + - match: { hits.hits.2.fields.product_id: ["129"] } + +--- +"field collapsing and rescore with window_size": + - do: + search: + index: products + body: + query: + bool: + filter: + match: + description: "tv" + should: + script_score: + query: { match_all: { } } + script: + source: "doc['score'].value" + collapse: + field: product_id + rescore: + window_size: 2 + query: + rescore_query: + script_score: + query: { match_all: { } } + script: + source: "doc['popularity'].value" + query_weight: 0 + rescore_query_weight: 1 + size: 1 + + + - match: {hits.total.value: 5 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0._score: 50} + - match: {hits.hits.0.fields.product_id: ["10"]} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml index 817c62dbdd12d..7625f19557e9b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml @@ -41,7 +41,7 @@ fetch fields: - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } - - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _ignored, _routing, _source] } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } - length: { profile.shards.0.fetch.children: 2 } - match: { profile.shards.0.fetch.children.0.type: FetchFieldsPhase } - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader_count: 0 } @@ -74,7 +74,7 @@ fetch source: - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } - - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _ignored, _routing, _source] } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } - length: { profile.shards.0.fetch.children: 3 } - match: { profile.shards.0.fetch.children.0.type: FetchFieldsPhase } - match: { profile.shards.0.fetch.children.1.type: FetchSourcePhase } @@ -139,7 +139,7 @@ fetch nested source: - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } - - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _ignored, _routing, _source] } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } - length: { profile.shards.0.fetch.children: 4 } - match: { profile.shards.0.fetch.children.0.type: FetchFieldsPhase } - match: { profile.shards.0.fetch.children.1.type: FetchSourcePhase } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java index 235d1592cf7c7..6f6db1d2d3d82 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java @@ -39,6 +39,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.IntStream; +import static org.elasticsearch.index.shard.IndexShardTestCase.closeShardNoCheck; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.equalTo; @@ -260,7 +261,7 @@ public void testFailingTargetShards() throws Exception { IndexShard indexShard = indicesService.getShardOrNull(shardId); assertNotNull("No shard found for shard " + shardId, indexShard); logger.info("--> failing shard {} on node {}", shardRequest.shardId(), node); - indexShard.close("test", randomBoolean()); + closeShardNoCheck(indexShard, randomBoolean()); failedShards.incrementAndGet(); } else { successfulShards.incrementAndGet(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java index 4bd97f772e4c3..26afe0e52bd02 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.TimeValue; @@ -92,7 +93,7 @@ public void testBasic() { indices.add(randomFrom("*", "local_*", "local_test")); } indices.add(randomFrom("*:*", "remote_cluster:*", "remote_cluster:remote_test")); - String pitId = openPointInTime(indices.toArray(new String[0]), TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(indices.toArray(new String[0]), TimeValue.timeValueMinutes(2)); try { if (randomBoolean()) { localClient.prepareIndex("local_test").setId("local_new").setSource().get(); @@ -162,7 +163,7 @@ public void testOpenPITWithIndexFilter() { request.keepAlive(TimeValue.timeValueMinutes(2)); request.indexFilter(new RangeQueryBuilder("@timestamp").gte("2023-12-15")); final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); - String pitId = response.getPointInTimeId(); + BytesReference pitId = response.getPointInTimeId(); if (randomBoolean()) { localClient.prepareIndex("local_test").setId("local_new").setSource().get(); @@ -252,7 +253,7 @@ public void testFailuresOnOneShardsWithPointInTime() throws ExecutionException, indices.add(randomFrom("*", "local_*", "local_test")); } indices.add(randomFrom("*:*", "remote_cluster:*", "remote_cluster:remote_test")); - String pitId = openPointInTime(indices.toArray(new String[0]), TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(indices.toArray(new String[0]), TimeValue.timeValueMinutes(2)); try { if (randomBoolean()) { localClient.prepareIndex("local_test").setId("local_new").setSource().get(); @@ -308,13 +309,13 @@ private static void assertAllSuccessfulShards(SearchResponse.Cluster cluster, in assertFalse(cluster.isTimedOut()); } - private String openPointInTime(String[] indices, TimeValue keepAlive) { + private BytesReference openPointInTime(String[] indices, TimeValue keepAlive) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); return response.getPointInTimeId(); } - private void closePointInTime(String readerId) { + private void closePointInTime(BytesReference readerId) { client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(readerId)).actionGet(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java index 0c1930c0cf925..a9a5bb074c9ac 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java @@ -16,6 +16,8 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.TimeValue; @@ -82,7 +84,7 @@ public void testBasic() { prepareIndex("test").setId(id).setSource("value", i).get(); } refresh("test"); - String pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); assertResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp1 -> { assertThat(resp1.pointInTimeId(), equalTo(pitId)); assertHitCount(resp1, numDocs); @@ -128,7 +130,7 @@ public void testMultipleIndices() { prepareIndex(index).setId(id).setSource("value", i).get(); } refresh(); - String pitId = openPointInTime(new String[] { "*" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "*" }, TimeValue.timeValueMinutes(2)); try { int moreDocs = randomIntBetween(10, 50); assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { @@ -181,7 +183,7 @@ public void testIndexFilter() { OpenPointInTimeRequest request = new OpenPointInTimeRequest("*").keepAlive(TimeValue.timeValueMinutes(2)); request.indexFilter(new RangeQueryBuilder("@timestamp").gte("2023-03-01")); final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); - String pitId = response.getPointInTimeId(); + BytesReference pitId = response.getPointInTimeId(); try { SearchContextId searchContextId = SearchContextId.decode(writableRegistry(), pitId); String[] actualIndices = searchContextId.getActualIndices(); @@ -210,7 +212,7 @@ public void testRelocation() throws Exception { prepareIndex("test").setId(Integer.toString(i)).setSource("value", i).get(); } refresh(); - String pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); try { assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs); @@ -262,7 +264,7 @@ public void testPointInTimeNotFound() throws Exception { prepareIndex("index").setId(id).setSource("value", i).get(); } refresh(); - String pit = openPointInTime(new String[] { "index" }, TimeValue.timeValueSeconds(5)); + BytesReference pit = openPointInTime(new String[] { "index" }, TimeValue.timeValueSeconds(5)); assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pit)), resp1 -> { assertHitCount(resp1, index1); if (rarely()) { @@ -303,7 +305,7 @@ public void testIndexNotFound() { prepareIndex("index-2").setId(id).setSource("value", i).get(); } refresh(); - String pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueMinutes(2)); + BytesReference pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueMinutes(2)); try { assertNoFailuresAndResponse( prepareSearch().setPointInTime(new PointInTimeBuilder(pit)), @@ -333,7 +335,7 @@ public void testIndexNotFound() { public void testAllowNoIndex() { var request = new OpenPointInTimeRequest("my_index").indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN) .keepAlive(TimeValue.timeValueMinutes(between(1, 10))); - String pit = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet().getPointInTimeId(); + BytesReference pit = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet().getPointInTimeId(); var closeResp = client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pit)).actionGet(); assertThat(closeResp.status(), equalTo(RestStatus.OK)); } @@ -346,7 +348,7 @@ public void testCanMatch() throws Exception { assertAcked(prepareCreate("test").setSettings(settings).setMapping(""" {"properties":{"created_date":{"type": "date", "format": "yyyy-MM-dd"}}}""")); ensureGreen("test"); - String pitId = openPointInTime(new String[] { "test*" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test*" }, TimeValue.timeValueMinutes(2)); try { for (String node : internalCluster().nodesInclude("test")) { for (IndexService indexService : internalCluster().getInstance(IndicesService.class, node)) { @@ -413,7 +415,7 @@ public void testPartialResults() throws Exception { prepareIndex(randomFrom("test-2")).setId(Integer.toString(i)).setSource("value", i).get(); } refresh(); - String pitId = openPointInTime(new String[] { "test-*" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test-*" }, TimeValue.timeValueMinutes(2)); try { assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs1 + numDocs2); @@ -445,7 +447,7 @@ public void testPITTiebreak() throws Exception { } } refresh("index-*"); - String pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueHours(1)); + BytesReference pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueHours(1)); try { for (int size = 1; size <= numIndex; size++) { SortOrder order = randomBoolean() ? SortOrder.ASC : SortOrder.DESC; @@ -476,7 +478,10 @@ public void testPITTiebreak() throws Exception { } public void testCloseInvalidPointInTime() { - expectThrows(Exception.class, client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(""))); + expectThrows( + Exception.class, + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(BytesArray.EMPTY)) + ); List tasks = clusterAdmin().prepareListTasks().setActions(TransportClosePointInTimeAction.TYPE.name()).get().getTasks(); assertThat(tasks, empty()); } @@ -585,13 +590,13 @@ private void assertPagination(PointInTimeBuilder pit, int expectedNumDocs, int s assertThat(seen.size(), equalTo(expectedNumDocs)); } - private String openPointInTime(String[] indices, TimeValue keepAlive) { + private BytesReference openPointInTime(String[] indices, TimeValue keepAlive) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); return response.getPointInTimeId(); } - private void closePointInTime(String readerId) { + private void closePointInTime(BytesReference readerId) { client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(readerId)).actionGet(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java index 38921840a2c64..3135647adc9ab 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java @@ -184,6 +184,7 @@ public void testNodeRemovalFromRedClusterWithTimeout() throws Exception { PrevalidateNodeRemovalRequest req = PrevalidateNodeRemovalRequest.builder() .setNames(node2) .build() + .masterNodeTimeout(TimeValue.timeValueSeconds(1)) .timeout(TimeValue.timeValueSeconds(1)); PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req).get(); assertFalse("prevalidation result should return false", resp.getPrevalidation().isSafe()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java index 895a60133251f..006c9e2394f3c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -525,7 +526,7 @@ public void testSearchRouting() throws Exception { // do nothing } } - String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openRequest).actionGet().getPointInTimeId(); + BytesReference pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openRequest).actionGet().getPointInTimeId(); try { assertResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)).setProfile(true), response -> { var profileResults = response.getProfileResults(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/IgnoredMetadataFieldIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/IgnoredMetadataFieldIT.java new file mode 100644 index 0000000000000..cfe5a2b69c6da --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/IgnoredMetadataFieldIT.java @@ -0,0 +1,164 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.index.query.IdsQueryBuilder; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; +import org.elasticsearch.search.aggregations.metrics.InternalAvg; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.junit.Before; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.hasSize; + +@SuppressWarnings("resource") +public class IgnoredMetadataFieldIT extends ESSingleNodeTestCase { + + public static final String NUMERIC_FIELD_NAME = "numeric_field"; + public static final String DATE_FIELD_NAME = "date_field"; + public static final String TEST_INDEX = "test-index"; + public static final String CORRECT_FIELD_TYPE_DOC_ID = "1"; + public static final String WRONG_FIELD_TYPE_DOC_ID = "2"; + + @Before + public void createTestIndex() throws Exception { + CreateIndexResponse createIndexResponse = null; + try { + XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject(NUMERIC_FIELD_NAME) + .field("type", "long") + .field("ignore_malformed", true) + .endObject() + .startObject(DATE_FIELD_NAME) + .field("type", "date") + .field("ignore_malformed", true) + .endObject() + .endObject() + .endObject() + .endObject(); + createIndexResponse = indicesAdmin().prepareCreate(TEST_INDEX).setMapping(mapping).get(); + assertAcked(createIndexResponse); + indexTestDoc(NUMERIC_FIELD_NAME, CORRECT_FIELD_TYPE_DOC_ID, "42"); + indexTestDoc(NUMERIC_FIELD_NAME, WRONG_FIELD_TYPE_DOC_ID, "forty-two"); + } finally { + if (createIndexResponse != null) { + createIndexResponse.decRef(); + } + } + } + + public void testIgnoredMetadataFieldFetch() { + SearchResponse searchResponse1 = null; + SearchResponse searchResponse2 = null; + try { + searchResponse1 = client().prepareSearch() + .setQuery(new IdsQueryBuilder().addIds(CORRECT_FIELD_TYPE_DOC_ID)) + .addFetchField(NUMERIC_FIELD_NAME) + .get(); + assertHitCount(searchResponse1, 1); + SearchHit hit = searchResponse1.getHits().getAt(0); + DocumentField numericField = hit.field(NUMERIC_FIELD_NAME); + assertNotNull(numericField); + assertEquals(42, (long) numericField.getValue()); + DocumentField ignoredField = hit.field(IgnoredFieldMapper.NAME); + assertNull(ignoredField); + + searchResponse2 = client().prepareSearch() + .setQuery(new IdsQueryBuilder().addIds(WRONG_FIELD_TYPE_DOC_ID)) + .addFetchField(NUMERIC_FIELD_NAME) + .get(); + assertHitCount(searchResponse2, 1); + hit = searchResponse2.getHits().getAt(0); + numericField = hit.field(NUMERIC_FIELD_NAME); + assertNotNull(numericField); + assertEquals("forty-two", numericField.getIgnoredValues().get(0)); + ignoredField = hit.field(IgnoredFieldMapper.NAME); + assertNotNull(ignoredField); + assertEquals(NUMERIC_FIELD_NAME, ignoredField.getValue()); + } finally { + if (searchResponse1 != null) { + searchResponse1.decRef(); + } + if (searchResponse2 != null) { + searchResponse2.decRef(); + } + } + } + + public void testIgnoredMetadataFieldAggregation() { + SearchResponse avgSearch = null; + SearchResponse termsSearch = null; + try { + indexTestDoc(NUMERIC_FIELD_NAME, "correct-44", "44"); + avgSearch = client().prepareSearch(TEST_INDEX) + .setSize(0) + .addAggregation(avg("numeric-field-aggs").field(NUMERIC_FIELD_NAME)) + .get(); + assertTrue(avgSearch.hasAggregations()); + InternalAvg avg = avgSearch.getAggregations().get("numeric-field-aggs"); + assertNotNull(avg); + assertEquals(43.0, avg.getValue(), 0.0); + + indexTestDoc(NUMERIC_FIELD_NAME, "wrong-44", "forty-four"); + indexTestDoc(DATE_FIELD_NAME, "wrong-date", "today"); + termsSearch = client().prepareSearch(TEST_INDEX) + .setSize(0) + .addAggregation(terms("ignored-field-aggs").field(IgnoredFieldMapper.NAME)) + .get(); + assertTrue(termsSearch.hasAggregations()); + StringTerms terms = termsSearch.getAggregations().get("ignored-field-aggs"); + assertNotNull(terms); + assertThat(terms.getBuckets(), hasSize(2)); + StringTerms.Bucket numericFieldBucket = terms.getBucketByKey(NUMERIC_FIELD_NAME); + assertEquals(NUMERIC_FIELD_NAME, numericFieldBucket.getKeyAsString()); + assertEquals(2, numericFieldBucket.getDocCount()); + StringTerms.Bucket dateFieldBucket = terms.getBucketByKey(DATE_FIELD_NAME); + assertEquals(DATE_FIELD_NAME, dateFieldBucket.getKeyAsString()); + assertEquals(1, dateFieldBucket.getDocCount()); + } finally { + if (avgSearch != null) { + avgSearch.decRef(); + } + if (termsSearch != null) { + termsSearch.decRef(); + } + } + } + + private void indexTestDoc(String testField, String docId, String testValue) { + DocWriteResponse docWriteResponse = null; + try { + docWriteResponse = client().prepareIndex(TEST_INDEX) + .setId(docId) + .setSource(testField, testValue) + .setRefreshPolicy(IMMEDIATE) + .get(); + assertEquals(RestStatus.CREATED, docWriteResponse.status()); + } finally { + if (docWriteResponse != null) { + docWriteResponse.decRef(); + } + } + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/GlobalCheckpointListenersIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/GlobalCheckpointListenersIT.java index b38198a98b5a5..256bdd45afbf7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/GlobalCheckpointListenersIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/GlobalCheckpointListenersIT.java @@ -25,6 +25,7 @@ import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; +import static org.elasticsearch.index.shard.IndexShardTestCase.closeShardNoCheck; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; @@ -101,7 +102,7 @@ public void accept(final long g, final Exception e) { } }, null); - shard.close("closed", randomBoolean()); + closeShardNoCheck(shard, randomBoolean()); assertBusy(() -> assertTrue(invoked.get())); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java index ec9373120f491..c01d945ca2a1a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -93,6 +93,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE; import static org.elasticsearch.cluster.routing.TestShardRouting.shardRoutingBuilder; +import static org.elasticsearch.index.shard.IndexShardTestCase.closeShardNoCheck; import static org.elasticsearch.index.shard.IndexShardTestCase.getTranslog; import static org.elasticsearch.index.shard.IndexShardTestCase.recoverFromStore; import static org.elasticsearch.test.LambdaMatchers.falseWith; @@ -545,7 +546,7 @@ public void testShardHasMemoryBufferOnTranslogRecover() throws Throwable { prepareIndex("test").setId("1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); CheckedFunction wrapper = directoryReader -> directoryReader; - shard.close("simon says", false); + closeShardNoCheck(shard); AtomicReference shardRef = new AtomicReference<>(); List failures = new ArrayList<>(); IndexingOperationListener listener = new IndexingOperationListener() { @@ -583,7 +584,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResul try { ExceptionsHelper.rethrowAndSuppress(failures); } finally { - newShard.close("just do it", randomBoolean()); + closeShardNoCheck(newShard, randomBoolean()); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java index 58b63eb77d2bd..a9d19473164bf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -181,6 +181,7 @@ public void testCorruptFileAndRecover() throws InterruptedException, IOException ClusterHealthResponse health = clusterAdmin().health( new ClusterHealthRequest("test").waitForGreenStatus() // sometimes due to cluster rebalancing and random settings default timeout is just not enough. + .masterNodeTimeout(TimeValue.timeValueMinutes(5)) .timeout(TimeValue.timeValueMinutes(5)) .waitForNoRelocatingShards(true) ).actionGet(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateActionIT.java index ce3439a5800a8..6f5ebcf17686d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateActionIT.java @@ -13,6 +13,8 @@ import java.io.IOException; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; + public class RestClusterStateActionIT extends ESIntegTestCase { @Override @@ -22,7 +24,7 @@ protected boolean addMockHttpTransport() { public void testInfiniteTimeOut() throws IOException { final var request = new Request("GET", "/_cluster/state/none"); - request.addParameter("master_timeout", "-1"); + request.addParameter(REST_MASTER_TIMEOUT_PARAM, "-1"); getRestClient().performRequest(request); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/QueryRewriteContextIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/QueryRewriteContextIT.java index 8197b5b8bdd48..753a0a62bce5e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/QueryRewriteContextIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/QueryRewriteContextIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.search.TransportClosePointInTimeAction; import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -136,7 +137,7 @@ public void testResolvedIndices_TransportSearchAction() { assertResolvedIndices(prepareSearch("test*"), Set.of("test*"), Set.of(indices), r -> {}); assertResolvedIndices(prepareSearch("alias"), Set.of("alias"), Set.of(indices), r -> {}); - final String pointInTimeId = openPointInTime(indices, TimeValue.timeValueMinutes(2)); + final BytesReference pointInTimeId = openPointInTime(indices, TimeValue.timeValueMinutes(2)); try { final PointInTimeBuilder pointInTimeBuilder = new PointInTimeBuilder(pointInTimeId); assertResolvedIndices(prepareSearch().setPointInTime(pointInTimeBuilder), Set.of(indices), Set.of(indices), r -> {}); @@ -190,13 +191,13 @@ public void testResolvedIndices_TransportValidateQueryAction() { ); } - private String openPointInTime(String[] indices, TimeValue keepAlive) { + private BytesReference openPointInTime(String[] indices, TimeValue keepAlive) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); return response.getPointInTimeId(); } - private void closePointInTime(String pointInTimeId) { + private void closePointInTime(BytesReference pointInTimeId) { ClosePointInTimeResponse response = client().execute( TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pointInTimeId) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java index 07d976437c24c..d0ff46238c42a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -100,7 +100,11 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc } ClusterHealthResponse clusterHealthResponse = clusterAdmin() // it's OK to timeout here - .health(new ClusterHealthRequest(new String[] {}).waitForYellowStatus().timeout(TimeValue.timeValueSeconds(5))) + .health( + new ClusterHealthRequest(new String[] {}).waitForYellowStatus() + .masterNodeTimeout(TimeValue.timeValueSeconds(5)) + .timeout(TimeValue.timeValueSeconds(5)) + ) .get(); final int numDocs; final boolean expectAllShardsFailed; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index 64f04d46a9a90..4446338c4ff2a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -83,6 +83,7 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.action.support.ActionTestUtils.wrapAsRestResponseListener; +import static org.elasticsearch.index.shard.IndexShardTestCase.closeShardNoCheck; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.array; @@ -526,7 +527,7 @@ private void moveOrCloseShardsOnNodes(String nodeName) throws Exception { for (IndexService indexService : indicesService) { for (IndexShard indexShard : indexService) { if (randomBoolean()) { - indexShard.close("test", randomBoolean()); + closeShardNoCheck(indexShard, randomBoolean()); } else if (randomBoolean()) { final ShardId shardId = indexShard.shardId(); final String[] nodeNames = internalCluster().getNodeNames(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java index 110ac76849e0b..2b61e6ae5d1ad 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.collapse.CollapseBuilder; import org.elasticsearch.search.rescore.QueryRescoreMode; import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.sort.SortBuilders; @@ -30,8 +31,10 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; +import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; +import java.util.List; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.lucene.search.function.CombineFunction.REPLACE; @@ -845,4 +848,139 @@ public void testRescorePhaseWithInvalidSort() throws Exception { } ); } + + record GroupDoc(String id, String group, float firstPassScore, float secondPassScore, boolean shouldFilter) {} + + public void testRescoreAfterCollapse() throws Exception { + assertAcked(prepareCreate("test").setMapping("group", "type=keyword", "shouldFilter", "type=boolean")); + ensureGreen("test"); + GroupDoc[] groupDocs = new GroupDoc[] { + new GroupDoc("1", "c", 200, 1, false), + new GroupDoc("2", "a", 1, 10, true), + new GroupDoc("3", "b", 2, 30, false), + new GroupDoc("4", "c", 1, 1000, false), + // should be highest on rescore, but filtered out during collapse + new GroupDoc("5", "b", 1, 40, false), + new GroupDoc("6", "a", 2, 20, false) }; + List requests = new ArrayList<>(); + for (var groupDoc : groupDocs) { + requests.add( + client().prepareIndex("test") + .setId(groupDoc.id()) + .setRouting(groupDoc.group()) + .setSource( + "group", + groupDoc.group(), + "firstPassScore", + groupDoc.firstPassScore(), + "secondPassScore", + groupDoc.secondPassScore(), + "shouldFilter", + groupDoc.shouldFilter() + ) + ); + } + indexRandom(true, requests); + + var request = client().prepareSearch("test") + .setQuery(fieldValueScoreQuery("firstPassScore")) + .addRescorer(new QueryRescorerBuilder(fieldValueScoreQuery("secondPassScore"))) + .setCollapse(new CollapseBuilder("group")); + assertResponse(request, resp -> { + assertThat(resp.getHits().getTotalHits().value, equalTo(5L)); + assertThat(resp.getHits().getHits().length, equalTo(3)); + + SearchHit hit1 = resp.getHits().getAt(0); + assertThat(hit1.getId(), equalTo("1")); + assertThat(hit1.getScore(), equalTo(201F)); + assertThat(hit1.field("group").getValues().size(), equalTo(1)); + assertThat(hit1.field("group").getValues().get(0), equalTo("c")); + + SearchHit hit2 = resp.getHits().getAt(1); + assertThat(hit2.getId(), equalTo("3")); + assertThat(hit2.getScore(), equalTo(32F)); + assertThat(hit2.field("group").getValues().size(), equalTo(1)); + assertThat(hit2.field("group").getValues().get(0), equalTo("b")); + + SearchHit hit3 = resp.getHits().getAt(2); + assertThat(hit3.getId(), equalTo("6")); + assertThat(hit3.getScore(), equalTo(22F)); + assertThat(hit3.field("group").getValues().size(), equalTo(1)); + assertThat(hit3.field("group").getValues().get(0), equalTo("a")); + }); + } + + public void testRescoreAfterCollapseRandom() throws Exception { + assertAcked(prepareCreate("test").setMapping("group", "type=keyword", "shouldFilter", "type=boolean")); + ensureGreen("test"); + int numGroups = randomIntBetween(1, 100); + int numDocs = atLeast(100); + GroupDoc[] groups = new GroupDoc[numGroups]; + int numHits = 0; + List requests = new ArrayList<>(); + for (int i = 0; i < numDocs; i++) { + int group = randomIntBetween(0, numGroups - 1); + boolean shouldFilter = rarely(); + String id = randomUUID(); + float firstPassScore = randomFloat(); + float secondPassScore = randomFloat(); + float bestScore = groups[group] == null ? -1 : groups[group].firstPassScore; + var groupDoc = new GroupDoc(id, Integer.toString(group), firstPassScore, secondPassScore, shouldFilter); + if (shouldFilter == false) { + if (firstPassScore == bestScore) { + // avoid tiebreaker + continue; + } + + numHits++; + if (firstPassScore > bestScore) { + groups[group] = groupDoc; + } + } + requests.add( + client().prepareIndex("test") + .setId(groupDoc.id()) + .setRouting(groupDoc.group()) + .setSource( + "group", + groupDoc.group(), + "firstPassScore", + groupDoc.firstPassScore(), + "secondPassScore", + groupDoc.secondPassScore(), + "shouldFilter", + groupDoc.shouldFilter() + ) + ); + } + indexRandom(true, requests); + + GroupDoc[] sortedGroups = Arrays.stream(groups) + .filter(g -> g != null) + .sorted(Comparator.comparingDouble(GroupDoc::secondPassScore).reversed()) + .toArray(GroupDoc[]::new); + + var request = client().prepareSearch("test") + .setQuery(fieldValueScoreQuery("firstPassScore")) + .addRescorer(new QueryRescorerBuilder(fieldValueScoreQuery("secondPassScore")).setQueryWeight(0f).windowSize(numGroups)) + .setCollapse(new CollapseBuilder("group")) + .setSize(Math.min(numGroups, 10)); + long expectedNumHits = numHits; + assertResponse(request, resp -> { + assertThat(resp.getHits().getTotalHits().value, equalTo(expectedNumHits)); + for (int pos = 0; pos < resp.getHits().getHits().length; pos++) { + SearchHit hit = resp.getHits().getAt(pos); + assertThat(hit.getId(), equalTo(sortedGroups[pos].id())); + String group = hit.field("group").getValue(); + assertThat(group, equalTo(sortedGroups[pos].group())); + assertThat(hit.getScore(), equalTo(sortedGroups[pos].secondPassScore)); + } + }); + } + + private QueryBuilder fieldValueScoreQuery(String scoreField) { + return functionScoreQuery(termQuery("shouldFilter", false), ScoreFunctionBuilders.fieldValueFactorFunction(scoreField)).boostMode( + CombineFunction.REPLACE + ); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java index a526e721da1ec..b9c3c27abf2d8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -452,7 +453,7 @@ public void testScrollAndSearchAfterWithBigIndex() { } } // search_after with sort with point in time - String pitID; + BytesReference pitID; { OpenPointInTimeRequest openPITRequest = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(5)); pitID = client().execute(TransportOpenPointInTimeAction.TYPE, openPITRequest).actionGet().getPointInTimeId(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java index 018bf1b7332a2..59373380d539c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.search.TransportClosePointInTimeAction; import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.Scroll; @@ -205,7 +206,7 @@ public void testPointInTime() throws Exception { // Open point-in-time reader OpenPointInTimeRequest request = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueSeconds(10)); OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); - String pointInTimeId = response.getPointInTimeId(); + BytesReference pointInTimeId = response.getPointInTimeId(); // Test sort on document IDs assertSearchSlicesWithPointInTime(field, ShardDocSortField.NAME, pointInTimeId, max, numDocs); @@ -217,7 +218,13 @@ public void testPointInTime() throws Exception { } } - private void assertSearchSlicesWithPointInTime(String sliceField, String sortField, String pointInTimeId, int numSlice, int numDocs) { + private void assertSearchSlicesWithPointInTime( + String sliceField, + String sortField, + BytesReference pointInTimeId, + int numSlice, + int numDocs + ) { int totalResults = 0; List keys = new ArrayList<>(); for (int id = 0; id < numSlice; id++) { diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index fc4323e418b72..6a53829099223 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -132,7 +132,7 @@ static TransportVersion def(int id) { public static final TransportVersion ML_DIMENSIONS_SET_BY_USER_ADDED = def(8_592_00_0); public static final TransportVersion INDEX_REQUEST_NORMALIZED_BYTES_PARSED = def(8_593_00_0); public static final TransportVersion INGEST_GRAPH_STRUCTURE_EXCEPTION = def(8_594_00_0); - public static final TransportVersion ML_MODEL_IN_SERVICE_SETTINGS = def(8_595_00_0); + public static final TransportVersion V_8_13_0 = def(8_595_00_0); // 8.14.0+ public static final TransportVersion RANDOM_AGG_SHARD_SEED = def(8_596_00_0); public static final TransportVersion ESQL_TIMINGS = def(8_597_00_0); @@ -184,6 +184,7 @@ static TransportVersion def(int id) { public static final TransportVersion ADD_RESOURCE_ALREADY_UPLOADED_EXCEPTION = def(8_643_00_0); public static final TransportVersion ESQL_MV_ORDERING_SORTED_ASCENDING = def(8_644_00_0); public static final TransportVersion ESQL_PAGE_MAPPING_TO_ITERATOR = def(8_645_00_0); + public static final TransportVersion BINARY_PIT_ID = def(8_646_00_0); /* * STOP! READ THIS FIRST! No, really, @@ -248,7 +249,7 @@ static TransportVersion def(int id) { * Reference to the minimum transport version that can be used with CCS. * This should be the transport version used by the previous minor release. */ - public static final TransportVersion MINIMUM_CCS_VERSION = V_8_12_0; + public static final TransportVersion MINIMUM_CCS_VERSION = V_8_13_0; static final NavigableMap VERSION_IDS = getAllVersionIds(TransportVersions.class); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java index 7bf0c976d52a5..a94555f1dfd1c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -24,13 +24,12 @@ import java.io.IOException; import java.util.Map; -import java.util.concurrent.TimeUnit; public class ClusterHealthRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices; private IndicesOptions indicesOptions = IndicesOptions.lenientExpandHidden(); - private TimeValue timeout = new TimeValue(30, TimeUnit.SECONDS); + private TimeValue timeout = TimeValue.timeValueSeconds(30); private ClusterHealthStatus waitForStatus; private boolean waitForNoRelocatingShards = false; private boolean waitForNoInitializingShards = false; @@ -121,9 +120,6 @@ public TimeValue timeout() { public ClusterHealthRequest timeout(TimeValue timeout) { this.timeout = timeout; - if (masterNodeTimeout == DEFAULT_MASTER_NODE_TIMEOUT) { - masterNodeTimeout = timeout; - } return this; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java index acffb014715dd..a88fb83b2300d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java @@ -94,9 +94,6 @@ public TimeValue timeout() { public PrevalidateNodeRemovalRequest timeout(TimeValue timeout) { this.timeout = timeout; - if (masterNodeTimeout == DEFAULT_MASTER_NODE_TIMEOUT) { - masterNodeTimeout = timeout; - } return this; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java index 5aeef6b19298e..b355d3c50400e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java @@ -131,12 +131,12 @@ public boolean equals(Object obj) { && Objects.equals(explain, other.explain) && Objects.equals(ackTimeout(), other.ackTimeout()) && Objects.equals(retryFailed, other.retryFailed) - && Objects.equals(masterNodeTimeout, other.masterNodeTimeout); + && Objects.equals(masterNodeTimeout(), other.masterNodeTimeout()); } @Override public int hashCode() { // Override equals and hashCode for testing - return Objects.hash(commands, dryRun, explain, ackTimeout(), retryFailed, masterNodeTimeout); + return Objects.hash(commands, dryRun, explain, ackTimeout(), retryFailed, masterNodeTimeout()); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 2e8a28d412e26..9127092bdb13a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -461,7 +461,7 @@ public boolean equals(Object o) { && Arrays.equals(indices, that.indices) && Objects.equals(indicesOptions, that.indicesOptions) && Arrays.equals(featureStates, that.featureStates) - && Objects.equals(masterNodeTimeout, that.masterNodeTimeout) + && Objects.equals(masterNodeTimeout(), that.masterNodeTimeout()) && Objects.equals(userMetadata, that.userMetadata); } @@ -495,7 +495,7 @@ public String toString() { + ", waitForCompletion=" + waitForCompletion + ", masterNodeTimeout=" - + masterNodeTimeout + + masterNodeTimeout() + ", metadata=" + userMetadata + '}'; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java index e9de49dcbf5b4..d29996711d722 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java @@ -222,7 +222,7 @@ public String getDescription() { if (indices.length > 0) { stringBuilder.append("indices ").append(Arrays.toString(indices)).append(", "); } - stringBuilder.append("master timeout [").append(masterNodeTimeout).append("]]"); + stringBuilder.append("master timeout [").append(masterNodeTimeout()).append("]]"); return stringBuilder.toString(); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 666419edc1bf0..7fa2e11317a43 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -254,7 +254,7 @@ public boolean equals(Object o) { return false; } UpdateSettingsRequest that = (UpdateSettingsRequest) o; - return masterNodeTimeout.equals(that.masterNodeTimeout) + return masterNodeTimeout().equals(that.masterNodeTimeout()) && ackTimeout().equals(that.ackTimeout()) && Objects.equals(settings, that.settings) && Objects.equals(indicesOptions, that.indicesOptions) @@ -265,7 +265,15 @@ && ackTimeout().equals(that.ackTimeout()) @Override public int hashCode() { - return Objects.hash(masterNodeTimeout, ackTimeout(), settings, indicesOptions, preserveExisting, reopen, Arrays.hashCode(indices)); + return Objects.hash( + masterNodeTimeout(), + ackTimeout(), + settings, + indicesOptions, + preserveExisting, + reopen, + Arrays.hashCode(indices) + ); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java index 3bf9c3715b29a..8ef1df3d29a58 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -196,7 +197,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(NAME.getPreferredName(), componentTemplate.getKey()); builder.field(COMPONENT_TEMPLATE.getPreferredName()); - componentTemplate.getValue().toXContent(builder, params, rolloverConfiguration, globalRetention); + componentTemplate.getValue() + .toXContent( + builder, + DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + rolloverConfiguration, + globalRetention + ); builder.endObject(); } builder.endArray(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index f2fcbeff73c37..07ebfe123c98f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -157,10 +158,6 @@ public Map indexTemplates() { return indexTemplates; } - public RolloverConfiguration getRolloverConfiguration() { - return rolloverConfiguration; - } - public DataStreamGlobalRetention getGlobalRetention() { return globalRetention; } @@ -199,7 +196,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(NAME.getPreferredName(), indexTemplate.getKey()); builder.field(INDEX_TEMPLATE.getPreferredName()); - indexTemplate.getValue().toXContent(builder, params, rolloverConfiguration, globalRetention); + indexTemplate.getValue() + .toXContent( + builder, + DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + rolloverConfiguration, + globalRetention + ); builder.endObject(); } builder.endArray(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index 52d40626f97ed..6985e86fb287a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -69,22 +70,10 @@ public SimulateIndexTemplateResponse( this.globalRetention = globalRetention; } - public Template getResolvedTemplate() { - return resolvedTemplate; - } - - public Map> getOverlappingTemplates() { - return overlappingTemplates; - } - public RolloverConfiguration getRolloverConfiguration() { return rolloverConfiguration; } - public DataStreamGlobalRetention getGlobalRetention() { - return globalRetention; - } - public SimulateIndexTemplateResponse(StreamInput in) throws IOException { super(in); resolvedTemplate = in.readOptionalWriteable(Template::new); @@ -132,7 +121,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); if (this.resolvedTemplate != null) { builder.field(TEMPLATE.getPreferredName()); - this.resolvedTemplate.toXContent(builder, params, rolloverConfiguration, globalRetention); + this.resolvedTemplate.toXContent( + builder, + DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + rolloverConfiguration, + globalRetention + ); } if (this.overlappingTemplates != null) { builder.startArray(OVERLAPPING.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index f2a581472303b..01ce7cbd3346b 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamAutoShardingEvent; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -546,7 +547,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.startArray(DATA_STREAMS_FIELD.getPreferredName()); for (DataStreamInfo dataStream : dataStreams) { - dataStream.toXContent(builder, params, rolloverConfiguration, globalRetention); + dataStream.toXContent( + builder, + DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + rolloverConfiguration, + globalRetention + ); } builder.endArray(); builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java index 17d33ae9167fd..36fc66c67c842 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -212,7 +213,12 @@ public Iterator toXContentChunked(ToXContent.Params outerP return builder; }), Iterators.map(indices.iterator(), explainIndexDataLifecycle -> (builder, params) -> { builder.field(explainIndexDataLifecycle.getIndex()); - explainIndexDataLifecycle.toXContent(builder, outerParams, rolloverConfiguration, globalRetention); + explainIndexDataLifecycle.toXContent( + builder, + DataStreamLifecycle.maybeAddEffectiveRetentionParams(outerParams), + rolloverConfiguration, + globalRetention + ); return builder; }), Iterators.single((builder, params) -> { builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java index 1c9dbb0575a1d..c7384e7003963 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java @@ -174,7 +174,12 @@ public XContentBuilder toXContent( builder.field(NAME_FIELD.getPreferredName(), dataStreamName); if (lifecycle != null) { builder.field(LIFECYCLE_FIELD.getPreferredName()); - lifecycle.toXContent(builder, params, rolloverConfiguration, globalRetention); + lifecycle.toXContent( + builder, + org.elasticsearch.cluster.metadata.DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + rolloverConfiguration, + globalRetention + ); } builder.endObject(); return builder; diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 1f8470b3bcd01..2f307d653f8a4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -661,7 +662,7 @@ private SearchResponse buildSearchResponse( SearchResponseSections internalSearchResponse, ShardSearchFailure[] failures, String scrollId, - String searchContextId + BytesReference searchContextId ) { int numSuccess = successfulOps.get(); int numFailures = failures.length; @@ -693,7 +694,7 @@ public void sendSearchResponse(SearchResponseSections internalSearchResponse, At raisePhaseFailure(new SearchPhaseExecutionException("", "Shard failures", null, failures)); } else { final String scrollId = request.scroll() != null ? TransportSearchHelper.buildScrollId(queryResults) : null; - final String searchContextId; + final BytesReference searchContextId; if (buildPointInTimeFromSearchResults()) { searchContextId = SearchContextId.encode(queryResults.asList(), aliasFilter, minTransportVersion); } else { diff --git a/server/src/main/java/org/elasticsearch/action/search/ClosePointInTimeRequest.java b/server/src/main/java/org/elasticsearch/action/search/ClosePointInTimeRequest.java index d97cddaf533e4..c2afb8fc05c46 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClosePointInTimeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClosePointInTimeRequest.java @@ -11,7 +11,8 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; -import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ParseField; @@ -20,28 +21,29 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.util.Base64; public class ClosePointInTimeRequest extends ActionRequest implements ToXContentObject { private static final ParseField ID = new ParseField("id"); - private final String id; + private final BytesReference id; public ClosePointInTimeRequest(StreamInput in) throws IOException { super(in); - this.id = in.readString(); + this.id = in.readBytesReference(); } - public ClosePointInTimeRequest(String id) { + public ClosePointInTimeRequest(BytesReference id) { this.id = id; } - public String getId() { + public BytesReference getId() { return id; } @Override public ActionRequestValidationException validate() { - if (Strings.isEmpty(id)) { + if (id.length() == 0) { return ValidateActions.addValidationError("id is empty", null); } return null; @@ -50,7 +52,7 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(id); + out.writeBytesReference(id); } @Override @@ -66,21 +68,21 @@ public static ClosePointInTimeRequest fromXContent(XContentParser parser) throws throw new IllegalArgumentException("Malformed content, must start with an object"); } else { XContentParser.Token token; - String id = null; + BytesReference id = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME && parser.currentName().equals(ID.getPreferredName())) { token = parser.nextToken(); if (token.isValue() == false) { throw new IllegalArgumentException("the request must contain only [" + ID.getPreferredName() + " field"); } - id = parser.text(); + id = new BytesArray(Base64.getUrlDecoder().decode(parser.text())); } else { throw new IllegalArgumentException( "Unknown parameter [" + parser.currentName() + "] in request body or parameter is of the wrong type[" + token + "] " ); } } - if (Strings.isNullOrEmpty(id)) { + if (id == null || id.length() == 0) { throw new IllegalArgumentException("search context id is is not provided"); } return new ClosePointInTimeRequest(id); diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java index fd565ad4878bf..dafcee894c9a6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java @@ -9,34 +9,36 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Base64; import java.util.Objects; public final class OpenPointInTimeResponse extends ActionResponse implements ToXContentObject { - private final String pointInTimeId; + private final BytesReference pointInTimeId; - public OpenPointInTimeResponse(String pointInTimeId) { + public OpenPointInTimeResponse(BytesReference pointInTimeId) { this.pointInTimeId = Objects.requireNonNull(pointInTimeId, "Point in time parameter must be not null"); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(pointInTimeId); + out.writeBytesReference(pointInTimeId); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field("id", pointInTimeId); + builder.field("id", Base64.getUrlEncoder().encodeToString(BytesReference.toBytes(pointInTimeId))); builder.endObject(); return builder; } - public String getPointInTimeId() { + public BytesReference getPointInTimeId() { return pointInTimeId; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java index 83a6870d72491..95d22e8a9034e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java @@ -8,15 +8,12 @@ package org.elasticsearch.action.search; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.Maps; @@ -27,10 +24,7 @@ import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.transport.RemoteClusterAware; -import java.io.ByteArrayInputStream; import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Base64; import java.util.Collections; import java.util.List; import java.util.Map; @@ -61,26 +55,21 @@ public boolean contains(ShardSearchContextId contextId) { return contextIds.contains(contextId); } - public static String encode( + public static BytesReference encode( List searchPhaseResults, Map aliasFilter, TransportVersion version ) { - final BytesReference bytesReference; - try (var encodedStreamOutput = new BytesStreamOutput()) { - try (var out = new OutputStreamStreamOutput(Base64.getUrlEncoder().wrap(encodedStreamOutput))) { - out.setTransportVersion(version); - TransportVersion.writeVersion(version, out); - out.writeCollection(searchPhaseResults, SearchContextId::writeSearchPhaseResult); - out.writeMap(aliasFilter, StreamOutput::writeWriteable); - } - bytesReference = encodedStreamOutput.bytes(); + try (var out = new BytesStreamOutput()) { + out.setTransportVersion(version); + TransportVersion.writeVersion(version, out); + out.writeCollection(searchPhaseResults, SearchContextId::writeSearchPhaseResult); + out.writeMap(aliasFilter, StreamOutput::writeWriteable); + return out.bytes(); } catch (IOException e) { assert false : e; throw new IllegalArgumentException(e); } - final BytesRef bytesRef = bytesReference.toBytesRef(); - return new String(bytesRef.bytes, bytesRef.offset, bytesRef.length, StandardCharsets.ISO_8859_1); } private static void writeSearchPhaseResult(StreamOutput out, SearchPhaseResult searchPhaseResult) throws IOException { @@ -89,11 +78,8 @@ private static void writeSearchPhaseResult(StreamOutput out, SearchPhaseResult s new SearchContextIdForNode(target.getClusterAlias(), target.getNodeId(), searchPhaseResult.getContextId()).writeTo(out); } - public static SearchContextId decode(NamedWriteableRegistry namedWriteableRegistry, String id) { - try ( - var decodedInputStream = Base64.getUrlDecoder().wrap(new ByteArrayInputStream(id.getBytes(StandardCharsets.ISO_8859_1))); - var in = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(decodedInputStream), namedWriteableRegistry) - ) { + public static SearchContextId decode(NamedWriteableRegistry namedWriteableRegistry, BytesReference id) { + try (var in = new NamedWriteableAwareStreamInput(id.streamInput(), namedWriteableRegistry)) { final TransportVersion version = TransportVersion.readVersion(in); in.setTransportVersion(version); final Map shards = Collections.unmodifiableMap( @@ -110,11 +96,8 @@ public static SearchContextId decode(NamedWriteableRegistry namedWriteableRegist } } - public static String[] decodeIndices(String id) { - try ( - var decodedInputStream = Base64.getUrlDecoder().wrap(new ByteArrayInputStream(id.getBytes(StandardCharsets.ISO_8859_1))); - var in = new InputStreamStreamInput(decodedInputStream) - ) { + public static String[] decodeIndices(BytesReference id) { + try (var in = id.streamInput()) { final TransportVersion version = TransportVersion.readVersion(in); in.setTransportVersion(version); final Map shards = Collections.unmodifiableMap( diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 12167c8361513..6a95eadc92139 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -366,9 +366,6 @@ public ActionRequestValidationException validate() { validationException ); } - if (source.collapse() != null && source.rescores() != null && source.rescores().isEmpty() == false) { - validationException = addValidationError("cannot use `collapse` in conjunction with `rescore`", validationException); - } if (source.storedFields() != null) { if (source.storedFields().fetchFields() == false) { if (source.fetchSource() != null && source.fetchSource().fetchSource()) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index ad1ceefbbe159..e2443566786ae 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -39,6 +40,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Base64; import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -75,7 +77,7 @@ public class SearchResponse extends ActionResponse implements ChunkedToXContentO private final Boolean terminatedEarly; private final int numReducePhases; private final String scrollId; - private final String pointInTimeId; + private final BytesReference pointInTimeId; private final int totalShards; private final int successfulShards; private final int skippedShards; @@ -109,7 +111,7 @@ public SearchResponse(StreamInput in) throws IOException { scrollId = in.readOptionalString(); tookInMillis = in.readVLong(); skippedShards = in.readVInt(); - pointInTimeId = in.readOptionalString(); + pointInTimeId = in.readOptionalBytesReference(); } public SearchResponse( @@ -156,7 +158,7 @@ public SearchResponse( long tookInMillis, ShardSearchFailure[] shardFailures, Clusters clusters, - String pointInTimeId + BytesReference pointInTimeId ) { this( searchResponseSections.hits, @@ -192,7 +194,7 @@ public SearchResponse( long tookInMillis, ShardSearchFailure[] shardFailures, Clusters clusters, - String pointInTimeId + BytesReference pointInTimeId ) { this.hits = hits; hits.incRef(); @@ -349,7 +351,7 @@ public String getScrollId() { /** * Returns the encoded string of the search context that the search request is used to executed */ - public String pointInTimeId() { + public BytesReference pointInTimeId() { return pointInTimeId; } @@ -419,7 +421,10 @@ public XContentBuilder headerToXContent(XContentBuilder builder, ToXContent.Para builder.field(SCROLL_ID.getPreferredName(), scrollId); } if (pointInTimeId != null) { - builder.field(POINT_IN_TIME_ID.getPreferredName(), pointInTimeId); + builder.field( + POINT_IN_TIME_ID.getPreferredName(), + Base64.getUrlEncoder().encodeToString(BytesReference.toBytes(pointInTimeId)) + ); } builder.field(TOOK.getPreferredName(), tookInMillis); builder.field(TIMED_OUT.getPreferredName(), isTimedOut()); @@ -462,7 +467,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(scrollId); out.writeVLong(tookInMillis); out.writeVInt(skippedShards); - out.writeOptionalString(pointInTimeId); + out.writeOptionalBytesReference(pointInTimeId); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java index 6459f6c1b458a..063dbb0397de8 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java @@ -14,6 +14,7 @@ import org.elasticsearch.core.TimeValue; import java.io.IOException; +import java.util.Objects; /** * A based request for master based operation. @@ -22,10 +23,18 @@ public abstract class MasterNodeRequest> extends ActionRequest implements IndicesRequest { - public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES); + public static final TimeValue DEFAULT_TIMEOUT = TimeValue.timeValueMinutes(1); /** * Target shard the request should execute on. In case of index and delete requests, diff --git a/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java b/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java index e689492523838..51952059d7d94 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java @@ -20,7 +20,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -import java.util.concurrent.TimeUnit; // TODO: This request and its associated transport action can be folded into UpdateRequest which is its only concrete production code // implementation @@ -28,7 +27,7 @@ public abstract class InstanceShardOperationRequest plugins; ESPolicy( @@ -46,18 +47,12 @@ final class ESPolicy extends Policy { PermissionCollection dynamic, Map plugins, boolean filterBadDefaults, - List dataPathPermissions + List dataPathPermissions, + List forbiddenFilePermissions ) { this.template = PolicyUtil.readPolicy(getClass().getResource(POLICY_RESOURCE), codebases); - PermissionCollection dpPermissions = null; - for (FilePermission permission : dataPathPermissions) { - if (dpPermissions == null) { - dpPermissions = permission.newPermissionCollection(); - } - dpPermissions.add(permission); - } - this.dataPathPermission = dpPermissions == null ? new Permissions() : dpPermissions; - this.dataPathPermission.setReadOnly(); + this.dataPathPermission = createPermission(dataPathPermissions); + this.forbiddenFilePermission = createPermission(forbiddenFilePermissions); this.untrusted = PolicyUtil.readPolicy(getClass().getResource(UNTRUSTED_RESOURCE), Collections.emptyMap()); if (filterBadDefaults) { this.system = new SystemPolicy(Policy.getPolicy()); @@ -68,6 +63,21 @@ final class ESPolicy extends Policy { this.plugins = plugins; } + private static PermissionCollection createPermission(List permissions) { + PermissionCollection coll = null; + for (FilePermission permission : permissions) { + if (coll == null) { + coll = permission.newPermissionCollection(); + } + coll.add(permission); + } + if (coll == null) { + coll = new Permissions(); + } + coll.setReadOnly(); + return coll; + } + @Override @SuppressForbidden(reason = "fast equals check is desired") public boolean implies(ProtectionDomain domain, Permission permission) { @@ -77,9 +87,12 @@ public boolean implies(ProtectionDomain domain, Permission permission) { return false; } + // completely deny access to specific files that are forbidden + if (forbiddenFilePermission.implies(permission)) { + return false; + } + URL location = codeSource.getLocation(); - // location can be null... ??? nobody knows - // https://bugs.openjdk.java.net/browse/JDK-8129972 if (location != null) { // run scripts with limited permissions if (BootstrapInfo.UNTRUSTED_CODEBASE.equals(location.getFile())) { @@ -93,17 +106,16 @@ public boolean implies(ProtectionDomain domain, Permission permission) { } } - if (permission instanceof FilePermission) { - // The FilePermission to check access to the path.data is the hottest permission check in - // Elasticsearch, so we check it first. - if (dataPathPermission.implies(permission)) { - return true; - } - // Special handling for broken Hadoop code: "let me execute or my classes will not load" - // yeah right, REMOVE THIS when hadoop is fixed - if ("<>".equals(permission.getName())) { - hadoopHack(); - } + // The FilePermission to check access to the path.data is the hottest permission check in + // Elasticsearch, so we explicitly check it here. + if (dataPathPermission.implies(permission)) { + return true; + } + + // Special handling for broken Hadoop code: "let me execute or my classes will not load" + // yeah right, REMOVE THIS when hadoop is fixed + if (permission instanceof FilePermission && "<>".equals(permission.getName())) { + hadoopHack(); } // otherwise defer to template + dynamic file permissions diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Security.java b/server/src/main/java/org/elasticsearch/bootstrap/Security.java index eef7228bb4812..1c37b3492c4cb 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -125,7 +125,8 @@ static void configure(Environment environment, boolean filterBadDefaults, Path p createPermissions(environment, pidFile), getPluginAndModulePermissions(environment), filterBadDefaults, - createRecursiveDataPathPermission(environment) + createRecursiveDataPathPermission(environment), + createForbiddenFilePermissions(environment) ) ); @@ -188,6 +189,18 @@ private static List createRecursiveDataPathPermission(Environmen return toFilePermissions(policy); } + private static List createForbiddenFilePermissions(Environment environment) throws IOException { + Permissions policy = new Permissions(); + addSingleFilePath(policy, environment.configFile().resolve("elasticsearch.yml"), "read,readlink,write,delete,execute"); + addSingleFilePath(policy, environment.configFile().resolve("jvm.options"), "read,readlink,write,delete,execute"); + Path jvmOptionsD = environment.configFile().resolve("jvm.options.d"); + if (Files.isDirectory(jvmOptionsD)) { + // we don't want to create this if it doesn't exist + addDirectoryPath(policy, "forbidden_access", jvmOptionsD, "read,readlink,write,delete,execute", false); + } + return toFilePermissions(policy); + } + /** Adds access to classpath jars/classes for jar hell scan, etc */ @SuppressForbidden(reason = "accesses fully qualified URLs to configure security") static void addClasspathPermissions(Permissions policy) throws IOException { @@ -219,6 +232,7 @@ static void addFilePermissions(Permissions policy, Environment environment, Path addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesFile(), "read,readlink", false); addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.pluginsFile(), "read,readlink", false); addDirectoryPath(policy, "path.conf", environment.configFile(), "read,readlink", false); + // read-write dirs addDirectoryPath(policy, "java.io.tmpdir", environment.tmpFile(), "read,readlink,write,delete", false); addDirectoryPath(policy, Environment.PATH_LOGS_SETTING.getKey(), environment.logsFile(), "read,readlink,write,delete", false); @@ -251,6 +265,7 @@ static void addFilePermissions(Permissions policy, Environment environment, Path for (Path path : environment.repoFiles()) { addDirectoryPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete", false); } + if (pidFile != null) { // we just need permission to remove the file if its elsewhere. addSingleFilePath(policy, pidFile, "delete"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index 9e23ffed6e8c5..9c89945046126 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -24,11 +24,13 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.rest.RestRequest; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.xcontent.AbstractObjectParser; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -355,6 +357,17 @@ public static DataStreamLifecycle fromXContent(XContentParser parser) throws IOE return PARSER.parse(parser, null); } + /** + * Adds a retention param to signal that this serialisation should include the effective retention metadata + */ + public static ToXContent.Params maybeAddEffectiveRetentionParams(ToXContent.Params params) { + boolean shouldAddEffectiveRetention = Objects.equals(params.param(RestRequest.PATH_RESTRICTED), "serverless"); + return new DelegatingMapParams( + Map.of(INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, Boolean.toString(shouldAddEffectiveRetention)), + params + ); + } + public static Builder newBuilder(DataStreamLifecycle lifecycle) { return new Builder().dataRetention(lifecycle.getDataRetention()) .downsampling(lifecycle.getDownsampling()) diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index c4952b8cae51d..86560b8a58963 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -1405,9 +1405,15 @@ protected static void throwEOF(int bytesToRead, int bytesAvailable) throws EOFEx * Read a {@link TimeValue} from the stream */ public TimeValue readTimeValue() throws IOException { - long duration = readZLong(); - TimeUnit timeUnit = TIME_UNITS[readByte()]; - return new TimeValue(duration, timeUnit); + final long duration = readZLong(); + final TimeUnit timeUnit = TIME_UNITS[readByte()]; + return switch (timeUnit) { + // avoid unnecessary allocation for some common cases: + case MILLISECONDS -> TimeValue.timeValueMillis(duration); + case SECONDS -> TimeValue.timeValueSeconds(duration); + case MINUTES -> TimeValue.timeValueMinutes(duration); + default -> new TimeValue(duration, timeUnit); + }; } /** diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 6edd43683519e..78f07c8a137b9 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -104,6 +104,7 @@ private static IndexVersion def(int id, Version luceneVersion) { public static final IndexVersion UPGRADE_TO_LUCENE_9_10 = def(8_503_00_0, Version.LUCENE_9_10_0); public static final IndexVersion TIME_SERIES_ROUTING_HASH_IN_ID = def(8_504_00_0, Version.LUCENE_9_10_0); public static final IndexVersion DEFAULT_DENSE_VECTOR_TO_INT8_HNSW = def(8_505_00_0, Version.LUCENE_9_10_0); + public static final IndexVersion DOC_VALUES_FOR_IGNORED_META_FIELD = def(8_505_00_1, Version.LUCENE_9_10_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java index d3e281ca115e1..e4b86876c99d3 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java @@ -63,6 +63,7 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.security.AccessControlException; import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; @@ -246,16 +247,17 @@ public static List getWordList( try { return loadWordList(path, removeComments); } catch (CharacterCodingException ex) { - String message = String.format( - Locale.ROOT, + String message = Strings.format( "Unsupported character encoding detected while reading %s: %s - files must be UTF-8 encoded", settingPath, - path.toString() + path ); throw new IllegalArgumentException(message, ex); } catch (IOException ioe) { - String message = String.format(Locale.ROOT, "IOException while reading %s: %s", settingPath, path.toString()); + String message = Strings.format("IOException while reading %s: %s", settingPath, path); throw new IllegalArgumentException(message, ioe); + } catch (AccessControlException ace) { + throw new IllegalArgumentException(Strings.format("Access denied trying to read file %s: %s", settingPath, path), ace); } } diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java index 3758858a5b10a..3e191d0ab1e25 100644 --- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -8,6 +8,7 @@ package org.elasticsearch.index.get; +import org.apache.lucene.index.SortedSetDocValues; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; @@ -17,10 +18,13 @@ import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader; import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; +import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperService; @@ -35,6 +39,8 @@ import org.elasticsearch.search.lookup.Source; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -308,6 +314,7 @@ private GetResult innerGetFetch( } // put stored fields into result objects + final IndexVersion indexVersion = indexSettings.getIndexVersionCreated(); if (leafStoredFieldLoader.storedFields().isEmpty() == false) { Set needed = new HashSet<>(); if (storedFields != null) { @@ -320,6 +327,10 @@ private GetResult innerGetFetch( if (false == needed.contains(entry.getKey())) { continue; } + if (IgnoredFieldMapper.NAME.equals(entry.getKey()) + && indexVersion.onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD)) { + continue; + } MappedFieldType ft = mapperService.fieldType(entry.getKey()); if (ft == null) { continue; // user asked for a non-existent field, ignore it @@ -333,6 +344,21 @@ private GetResult innerGetFetch( } } + // NOTE: when _ignored is requested via `stored_fields` we need to load it from doc values instead of loading it from stored fields. + // The _ignored field used to be stored, but as a result of supporting aggregations on it, it moved from using a stored field to + // using doc values. + if (indexVersion.onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD) + && storedFields != null + && Arrays.asList(storedFields).contains(IgnoredFieldMapper.NAME)) { + final DocumentField ignoredDocumentField = loadIgnoredMetadataField(docIdAndVersion); + if (ignoredDocumentField != null) { + if (metadataFields == null) { + metadataFields = new HashMap<>(); + } + metadataFields.put(IgnoredFieldMapper.NAME, ignoredDocumentField); + } + } + BytesReference sourceBytes = null; if (mapperService.mappingLookup().isSourceEnabled() && fetchSourceContext.fetchSource()) { Source source = loader.leaf(docIdAndVersion.reader, new int[] { docIdAndVersion.docId }) @@ -357,6 +383,22 @@ private GetResult innerGetFetch( ); } + private static DocumentField loadIgnoredMetadataField(final DocIdAndVersion docIdAndVersion) throws IOException { + final SortedSetDocValues ignoredDocValues = docIdAndVersion.reader.getContext() + .reader() + .getSortedSetDocValues(IgnoredFieldMapper.NAME); + if (ignoredDocValues == null + || ignoredDocValues.advanceExact(docIdAndVersion.docId) == false + || ignoredDocValues.docValueCount() <= 0) { + return null; + } + final List ignoredValues = new ArrayList<>(ignoredDocValues.docValueCount()); + for (int i = 0; i < ignoredDocValues.docValueCount(); i++) { + ignoredValues.add(ignoredDocValues.lookupOrd(ignoredDocValues.nextOrd()).utf8ToString()); + } + return new DocumentField(IgnoredFieldMapper.NAME, ignoredValues); + } + private static StoredFieldLoader buildStoredFieldLoader(String[] fields, FetchSourceContext fetchSourceContext, SourceLoader loader) { Set fieldsToLoad = new HashSet<>(); if (fields != null && fields.length > 0) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java index 4347bcfd8be3b..7da7992f9a9ca 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java @@ -9,10 +9,21 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StringField; +import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.fielddata.FieldData; +import org.elasticsearch.index.fielddata.FieldDataContext; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.script.field.KeywordDocValuesField; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import java.util.Collections; @@ -30,14 +41,20 @@ public static class Defaults { } public static final IgnoredFieldType FIELD_TYPE = new IgnoredFieldType(); + private static final IgnoredFieldMapper INSTANCE = new IgnoredFieldMapper(FIELD_TYPE); - private static final IgnoredFieldMapper INSTANCE = new IgnoredFieldMapper(); + public static final LegacyIgnoredFieldType LEGACY_FIELD_TYPE = new LegacyIgnoredFieldType(); + private static final IgnoredFieldMapper LEGACY_INSTANCE = new IgnoredFieldMapper(LEGACY_FIELD_TYPE); - public static final TypeParser PARSER = new FixedTypeParser(c -> INSTANCE); + public static final TypeParser PARSER = new FixedTypeParser(c -> getInstance(c.indexVersionCreated())); - public static final class IgnoredFieldType extends StringFieldType { + private static MetadataFieldMapper getInstance(IndexVersion indexVersion) { + return indexVersion.onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD) ? INSTANCE : LEGACY_INSTANCE; + } - private IgnoredFieldType() { + public static final class LegacyIgnoredFieldType extends StringFieldType { + + private LegacyIgnoredFieldType() { super(NAME, true, true, false, TextSearchInfo.SIMPLE_MATCH_ONLY, Collections.emptyMap()); } @@ -46,6 +63,11 @@ public String typeName() { return CONTENT_TYPE; } + @Override + public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { + return new StoredValueFetcher(context.lookup(), NAME); + } + @Override public Query existsQuery(SearchExecutionContext context) { // This query is not performance sensitive, it only helps assess @@ -54,21 +76,53 @@ public Query existsQuery(SearchExecutionContext context) { // field is bounded by the number of fields in the mappings. return new TermRangeQuery(name(), null, null, true, true); } + } + + public static final class IgnoredFieldType extends StringFieldType { + + private IgnoredFieldType() { + super(NAME, true, false, true, TextSearchInfo.SIMPLE_MATCH_ONLY, Collections.emptyMap()); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } @Override public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { - return new StoredValueFetcher(context.lookup(), NAME); + return new DocValueFetcher(docValueFormat(format, null), context.getForField(this, FielddataOperation.SEARCH)); + } + + public Query existsQuery(SearchExecutionContext context) { + return new FieldExistsQuery(name()); + } + + @Override + public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) { + return new SortedSetOrdinalsIndexFieldData.Builder( + name(), + CoreValuesSourceType.KEYWORD, + (dv, n) -> new KeywordDocValuesField(FieldData.toString(dv), n) + ); } } - private IgnoredFieldMapper() { - super(FIELD_TYPE); + private IgnoredFieldMapper(StringFieldType fieldType) { + super(fieldType); } @Override public void postParse(DocumentParserContext context) { - for (String field : context.getIgnoredFields()) { - context.doc().add(new StringField(NAME, field, Field.Store.YES)); + if (context.indexSettings().getIndexVersionCreated().onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD)) { + for (String ignoredField : context.getIgnoredFields()) { + context.doc().add(new SortedSetDocValuesField(NAME, new BytesRef(ignoredField))); + context.doc().add(new StringField(NAME, ignoredField, Field.Store.NO)); + } + } else { + for (String ignoredField : context.getIgnoredFields()) { + context.doc().add(new StringField(NAME, ignoredField, Field.Store.YES)); + } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index ebb6672cbab18..493d09a047a53 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -1709,10 +1709,6 @@ public Function pointReaderIfPossible() { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - if (indexMode == IndexMode.TIME_SERIES && metricType == TimeSeriesParams.MetricType.COUNTER) { - // Counters are not supported by ESQL so we load them in null - return BlockLoader.CONSTANT_NULLS; - } if (hasDocValues()) { return type.blockLoaderFromDocValues(name()); } diff --git a/server/src/main/java/org/elasticsearch/rest/RestUtils.java b/server/src/main/java/org/elasticsearch/rest/RestUtils.java index 4aa82f5e4b7c5..d33fa8ca8cebf 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestUtils.java +++ b/server/src/main/java/org/elasticsearch/rest/RestUtils.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.core.Booleans; +import org.elasticsearch.core.TimeValue; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; @@ -256,4 +257,27 @@ public static Optional extractTraceId(String traceparent) { return traceparent != null && traceparent.length() >= 55 ? Optional.of(traceparent.substring(3, 35)) : Optional.empty(); } + /** + * The name of the common {@code ?master_timeout} query parameter. + */ + public static final String REST_MASTER_TIMEOUT_PARAM = "master_timeout"; + + /** + * The default value for the common {@code ?master_timeout} query parameter. + */ + public static final TimeValue REST_MASTER_TIMEOUT_DEFAULT = TimeValue.timeValueSeconds(30); + + /** + * Extract the {@code ?master_timeout} parameter from the request, imposing the common default of {@code 30s} in case the parameter is + * missing. + * + * @param restRequest The request from which to extract the {@code ?master_timeout} parameter + * @return the timeout from the request, with a default of {@link #REST_MASTER_TIMEOUT_DEFAULT} ({@code 30s}) if the request does not + * specify the parameter + */ + public static TimeValue getMasterNodeTimeout(RestRequest restRequest) { + assert restRequest != null; + return restRequest.paramAsTime(REST_MASTER_TIMEOUT_PARAM, REST_MASTER_TIMEOUT_DEFAULT); + } + } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionAction.java index 8c8624f1766b1..74ecc85e960b5 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionAction.java @@ -22,6 +22,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestAddVotingConfigExclusionAction extends BaseRestHandler { private static final TimeValue DEFAULT_TIMEOUT = TimeValue.timeValueSeconds(30L); @@ -82,7 +83,7 @@ static AddVotingConfigExclusionsRequest resolveVotingConfigExclusionsRequest(fin request.paramAsTime("timeout", DEFAULT_TIMEOUT) ); - return resolvedRequest.masterNodeTimeout(request.paramAsTime("master_timeout", resolvedRequest.masterNodeTimeout())); + return resolvedRequest.masterNodeTimeout(getMasterNodeTimeout(request)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java index 534bb391e9ffe..7ef5b444304cf 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Cleans up a repository @@ -42,7 +43,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String name = request.param("repository"); CleanupRepositoryRequest cleanupRepositoryRequest = new CleanupRepositoryRequest(name); cleanupRepositoryRequest.ackTimeout(request.paramAsTime("timeout", cleanupRepositoryRequest.ackTimeout())); - cleanupRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", cleanupRepositoryRequest.masterNodeTimeout())); + cleanupRepositoryRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().cleanupRepository(cleanupRepositoryRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionsAction.java index 69b51afb8d257..ff26648476926 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionsAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestClearVotingConfigExclusionsAction extends BaseRestHandler { @@ -45,7 +46,7 @@ protected RestChannelConsumer prepareRequest(final RestRequest request, final No static ClearVotingConfigExclusionsRequest resolveVotingConfigExclusionsRequest(final RestRequest request) { final var resolvedRequest = new ClearVotingConfigExclusionsRequest(); - resolvedRequest.masterNodeTimeout(request.paramAsTime("master_timeout", resolvedRequest.masterNodeTimeout())); + resolvedRequest.masterNodeTimeout(getMasterNodeTimeout(request)); resolvedRequest.setTimeout(resolvedRequest.masterNodeTimeout()); resolvedRequest.setWaitForRemoval(request.paramAsBoolean("wait_for_removal", resolvedRequest.getWaitForRemoval())); return resolvedRequest; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java index 7785680a3ca8d..b6b63a6774667 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java @@ -24,6 +24,7 @@ import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Clones indices from one snapshot into another snapshot in the same repository @@ -51,7 +52,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC request.param("target_snapshot"), XContentMapValues.nodeStringArrayValue(source.getOrDefault("indices", Collections.emptyList())) ); - cloneSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", cloneSnapshotRequest.masterNodeTimeout())); + cloneSnapshotRequest.masterNodeTimeout(getMasterNodeTimeout(request)); cloneSnapshotRequest.indicesOptions(IndicesOptions.fromMap(source, cloneSnapshotRequest.indicesOptions())); return channel -> client.admin().cluster().cloneSnapshot(cloneSnapshotRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java index 7748944306e35..2d2d241c35086 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java @@ -29,6 +29,7 @@ import java.util.function.Predicate; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestClusterGetSettingsAction extends BaseRestHandler { @@ -64,7 +65,7 @@ public String getName() { private static void setUpRequestParams(MasterNodeReadRequest clusterRequest, RestRequest request) { clusterRequest.local(request.paramAsBoolean("local", clusterRequest.local())); - clusterRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterRequest.masterNodeTimeout())); + clusterRequest.masterNodeTimeout(getMasterNodeTimeout(request)); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java index 6518ccc6e0c94..fe0c005046900 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java @@ -31,6 +31,7 @@ import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestClusterHealthAction extends BaseRestHandler { @@ -63,8 +64,12 @@ public static ClusterHealthRequest fromRequest(final RestRequest request) { final ClusterHealthRequest clusterHealthRequest = new ClusterHealthRequest(indices); clusterHealthRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterHealthRequest.indicesOptions())); clusterHealthRequest.local(request.paramAsBoolean("local", clusterHealthRequest.local())); - clusterHealthRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterHealthRequest.masterNodeTimeout())); clusterHealthRequest.timeout(request.paramAsTime("timeout", clusterHealthRequest.timeout())); + if (request.hasParam("master_timeout")) { + clusterHealthRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + } else { + clusterHealthRequest.masterNodeTimeout(clusterHealthRequest.timeout()); + } String waitForStatus = request.param("wait_for_status"); if (waitForStatus != null) { clusterHealthRequest.waitForStatus(ClusterHealthStatus.valueOf(waitForStatus.toUpperCase(Locale.ROOT))); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java index a42882d1144c2..fee4cce3e7c3f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java @@ -31,6 +31,7 @@ import static org.elasticsearch.common.util.set.Sets.addToCopy; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestClusterRerouteAction extends BaseRestHandler { @@ -98,7 +99,7 @@ public static ClusterRerouteRequest createRequest(RestRequest request) throws IO clusterRerouteRequest.explain(request.paramAsBoolean("explain", clusterRerouteRequest.explain())); clusterRerouteRequest.ackTimeout(request.paramAsTime("timeout", clusterRerouteRequest.ackTimeout())); clusterRerouteRequest.setRetryFailed(request.paramAsBoolean("retry_failed", clusterRerouteRequest.isRetryFailed())); - clusterRerouteRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterRerouteRequest.masterNodeTimeout())); + clusterRerouteRequest.masterNodeTimeout(getMasterNodeTimeout(request)); request.applyContentParser(parser -> PARSER.parse(parser, clusterRerouteRequest, null)); return clusterRerouteRequest; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java index 72bea78e0103b..b3fb5ccfdfddf 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java @@ -42,6 +42,7 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.common.util.set.Sets.addToCopy; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestClusterStateAction extends BaseRestHandler { @@ -81,7 +82,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterStateRequest.indicesOptions())); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); if (request.hasParam("wait_for_metadata_version")) { clusterStateRequest.waitForMetadataVersion(request.paramAsLong("wait_for_metadata_version", 0)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java index cf22e403e1def..9f34ff5087094 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java @@ -24,6 +24,7 @@ import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestClusterUpdateSettingsAction extends BaseRestHandler { @@ -45,9 +46,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest(); clusterUpdateSettingsRequest.ackTimeout(request.paramAsTime("timeout", clusterUpdateSettingsRequest.ackTimeout())); - clusterUpdateSettingsRequest.masterNodeTimeout( - request.paramAsTime("master_timeout", clusterUpdateSettingsRequest.masterNodeTimeout()) - ); + clusterUpdateSettingsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); Map source; try (XContentParser parser = request.contentParser()) { source = parser.map(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java index 3e2543230ab06..9491ecfcc1115 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java @@ -21,6 +21,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Creates a new snapshot @@ -44,7 +45,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String snapshot = request.param("snapshot"); CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(repository, snapshot); request.applyContentParser(p -> createSnapshotRequest.source(p.mapOrdered())); - createSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createSnapshotRequest.masterNodeTimeout())); + createSnapshotRequest.masterNodeTimeout(getMasterNodeTimeout(request)); createSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); return channel -> client.admin().cluster().createSnapshot(createSnapshotRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteDesiredNodesAction.java index 4ecd784ecd37c..18045828f4401 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteDesiredNodesAction.java @@ -18,6 +18,8 @@ import java.io.IOException; import java.util.List; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; + public class RestDeleteDesiredNodesAction extends BaseRestHandler { @Override public String getName() { @@ -32,7 +34,7 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final AcknowledgedRequest.Plain deleteDesiredNodesRequest = new AcknowledgedRequest.Plain(); - deleteDesiredNodesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteDesiredNodesRequest.masterNodeTimeout())); + deleteDesiredNodesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return restChannel -> client.execute( TransportDeleteDesiredNodesAction.TYPE, deleteDesiredNodesRequest, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java index 3dc979f295530..a3ecaf3127c44 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Unregisters a repository @@ -45,7 +46,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String name = request.param("repository"); DeleteRepositoryRequest deleteRepositoryRequest = new DeleteRepositoryRequest(name); deleteRepositoryRequest.ackTimeout(request.paramAsTime("timeout", deleteRepositoryRequest.ackTimeout())); - deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout())); + deleteRepositoryRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin() .cluster() .deleteRepository( diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java index bedd44cf9f559..ad7bdc8a2c9b0 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Deletes a snapshot @@ -43,7 +44,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String repository = request.param("repository"); String[] snapshots = Strings.splitStringByCommaToArray(request.param("snapshot")); DeleteSnapshotRequest deleteSnapshotRequest = new DeleteSnapshotRequest(repository, snapshots); - deleteSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteSnapshotRequest.masterNodeTimeout())); + deleteSnapshotRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().deleteSnapshot(deleteSnapshotRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java index ca6a9b5fe1f22..46d48b90d283e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestDeleteStoredScriptAction extends BaseRestHandler { @@ -38,7 +39,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client String id = request.param("id"); DeleteStoredScriptRequest deleteStoredScriptRequest = new DeleteStoredScriptRequest(id); deleteStoredScriptRequest.ackTimeout(request.paramAsTime("timeout", deleteStoredScriptRequest.ackTimeout())); - deleteStoredScriptRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteStoredScriptRequest.masterNodeTimeout())); + deleteStoredScriptRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().deleteStoredScript(deleteStoredScriptRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredNodesAction.java index 869c34896c936..ae375309c301f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredNodesAction.java @@ -17,6 +17,8 @@ import java.io.IOException; import java.util.List; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; + public class RestGetDesiredNodesAction extends BaseRestHandler { @Override public String getName() { @@ -31,7 +33,7 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final GetDesiredNodesAction.Request getDesiredNodesRequest = new GetDesiredNodesAction.Request(); - getDesiredNodesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getDesiredNodesRequest.masterNodeTimeout())); + getDesiredNodesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return restChannel -> client.execute( GetDesiredNodesAction.INSTANCE, getDesiredNodesRequest, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetFeatureUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetFeatureUpgradeStatusAction.java index 9e53a76cdb131..13fbf3504ebc0 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetFeatureUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetFeatureUpgradeStatusAction.java @@ -18,6 +18,8 @@ import java.io.IOException; import java.util.List; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; + /** * Endpoint for getting the system feature upgrade status */ @@ -41,7 +43,7 @@ public boolean allowSystemIndexAccessByDefault() { protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final GetFeatureUpgradeStatusRequest req = new GetFeatureUpgradeStatusRequest(); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return restChannel -> { client.execute(GetFeatureUpgradeStatusAction.INSTANCE, req, new RestToXContentListener<>(restChannel)); }; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java index e3a7f2da79a11..c2d4484f1e098 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java @@ -24,6 +24,7 @@ import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Returns repository information @@ -51,7 +52,7 @@ public List routes() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String[] repositories = request.paramAsStringArray("repository", Strings.EMPTY_ARRAY); GetRepositoriesRequest getRepositoriesRequest = new GetRepositoriesRequest(repositories); - getRepositoriesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRepositoriesRequest.masterNodeTimeout())); + getRepositoriesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); getRepositoriesRequest.local(request.paramAsBoolean("local", getRepositoriesRequest.local())); settingsFilter.addFilterSettingParams(request); return channel -> client.admin().cluster().getRepositories(getRepositoriesRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java index 7482ae7683b4a..45913b9b3ce2a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java @@ -25,6 +25,7 @@ import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.snapshots.SnapshotInfo.INCLUDE_REPOSITORY_XCONTENT_PARAM; import static org.elasticsearch.snapshots.SnapshotInfo.INDEX_DETAILS_XCONTENT_PARAM; import static org.elasticsearch.snapshots.SnapshotInfo.INDEX_NAMES_XCONTENT_PARAM; @@ -80,7 +81,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final SortOrder order = SortOrder.fromString(request.param("order", getSnapshotsRequest.order().toString())); getSnapshotsRequest.order(order); getSnapshotsRequest.includeIndexNames(request.paramAsBoolean(INDEX_NAMES_XCONTENT_PARAM, getSnapshotsRequest.includeIndexNames())); - getSnapshotsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSnapshotsRequest.masterNodeTimeout())); + getSnapshotsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .cluster() .getSnapshots(getSnapshotsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java index b0d5bce981f2a..f827b07ebe96c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestGetStoredScriptAction extends BaseRestHandler { @@ -38,7 +39,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, NodeClient client) throws IOException { String id = request.param("id"); GetStoredScriptRequest getRequest = new GetStoredScriptRequest(id); - getRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRequest.masterNodeTimeout())); + getRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin() .cluster() .getStoredScript(getRequest, new RestToXContentListener<>(channel, GetStoredScriptResponse::status)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java index e5745ec89533c..c38f5effc385a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestPendingClusterTasksAction extends BaseRestHandler { @@ -38,7 +39,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest(); - pendingClusterTasksRequest.masterNodeTimeout(request.paramAsTime("master_timeout", pendingClusterTasksRequest.masterNodeTimeout())); + pendingClusterTasksRequest.masterNodeTimeout(getMasterNodeTimeout(request)); pendingClusterTasksRequest.local(request.paramAsBoolean("local", pendingClusterTasksRequest.local())); return channel -> client.execute( TransportPendingClusterTasksAction.TYPE, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPostFeatureUpgradeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPostFeatureUpgradeAction.java index ba65ab54c440c..cb9af32955abb 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPostFeatureUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPostFeatureUpgradeAction.java @@ -18,6 +18,8 @@ import java.io.IOException; import java.util.List; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; + /** * Endpoint for triggering a system feature upgrade */ @@ -41,7 +43,7 @@ public boolean allowSystemIndexAccessByDefault() { protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final PostFeatureUpgradeRequest req = new PostFeatureUpgradeRequest(); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return restChannel -> { client.execute(PostFeatureUpgradeAction.INSTANCE, req, new RestToXContentListener<>(restChannel)); }; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPrevalidateNodeRemovalAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPrevalidateNodeRemovalAction.java index 01b404e02f0a6..119f6660f2a33 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPrevalidateNodeRemovalAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPrevalidateNodeRemovalAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestPrevalidateNodeRemovalAction extends BaseRestHandler { @@ -43,8 +44,12 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli .setIds(ids) .setExternalIds(externalIds) .build(); - prevalidationRequest.masterNodeTimeout(request.paramAsTime("master_timeout", prevalidationRequest.masterNodeTimeout())); prevalidationRequest.timeout(request.paramAsTime("timeout", prevalidationRequest.timeout())); + if (request.hasParam("master_timeout")) { + prevalidationRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + } else { + prevalidationRequest.masterNodeTimeout(prevalidationRequest.timeout()); + } return channel -> client.execute( PrevalidateNodeRemovalAction.INSTANCE, prevalidationRequest, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java index 1fe1f8da2e5c8..385fc6c19143a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java @@ -25,6 +25,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Registers repositories @@ -50,7 +51,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC putRepositoryRequest.source(parser.mapOrdered()); } putRepositoryRequest.verify(request.paramAsBoolean("verify", true)); - putRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRepositoryRequest.masterNodeTimeout())); + putRepositoryRequest.masterNodeTimeout(getMasterNodeTimeout(request)); putRepositoryRequest.ackTimeout(request.paramAsTime("timeout", putRepositoryRequest.ackTimeout())); return channel -> client.admin() .cluster() diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java index 984882edcffaa..ce7052d02cb64 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java @@ -23,6 +23,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestPutStoredScriptAction extends BaseRestHandler { @@ -51,7 +52,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client StoredScriptSource source = StoredScriptSource.parse(content, xContentType); PutStoredScriptRequest putRequest = new PutStoredScriptRequest(id, context, content, request.getXContentType(), source); - putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); + putRequest.masterNodeTimeout(getMasterNodeTimeout(request)); putRequest.ackTimeout(request.paramAsTime("timeout", putRequest.ackTimeout())); return channel -> client.admin().cluster().putStoredScript(putRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java index b6fba8dd1054c..06524a040db36 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Restores a snapshot @@ -42,7 +43,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String repository = request.param("repository"); String snapshot = request.param("snapshot"); RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(repository, snapshot); - restoreSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", restoreSnapshotRequest.masterNodeTimeout())); + restoreSnapshotRequest.masterNodeTimeout(getMasterNodeTimeout(request)); restoreSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); request.applyContentParser(p -> restoreSnapshotRequest.source(p.mapOrdered())); return channel -> client.admin().cluster().restoreSnapshot(restoreSnapshotRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java index 3baebb25c4dc2..33b4ba04b826e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java @@ -22,6 +22,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Returns status of currently running snapshot @@ -53,7 +54,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC SnapshotsStatusRequest snapshotsStatusRequest = new SnapshotsStatusRequest(repository).snapshots(snapshots); snapshotsStatusRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", snapshotsStatusRequest.ignoreUnavailable())); - snapshotsStatusRequest.masterNodeTimeout(request.paramAsTime("master_timeout", snapshotsStatusRequest.masterNodeTimeout())); + snapshotsStatusRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .cluster() .snapshotsStatus(snapshotsStatusRequest, new RestRefCountedChunkedToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshottableFeaturesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshottableFeaturesAction.java index 8c88e51a2c045..b36c4ac56ae71 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshottableFeaturesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshottableFeaturesAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestSnapshottableFeaturesAction extends BaseRestHandler { @@ -37,7 +38,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final GetSnapshottableFeaturesRequest req = new GetSnapshottableFeaturesRequest(); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return restChannel -> { client.execute(SnapshottableFeaturesAction.INSTANCE, req, new RestToXContentListener<>(restChannel)); }; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java index 07c54fd258845..38b191ba9f006 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java @@ -24,6 +24,8 @@ import java.util.List; import java.util.function.Predicate; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; + public class RestUpdateDesiredNodesAction extends BaseRestHandler { private final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestUpdateDesiredNodesAction.class); @@ -66,7 +68,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli } } - updateDesiredNodesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateDesiredNodesRequest.masterNodeTimeout())); + updateDesiredNodesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return restChannel -> client.execute( UpdateDesiredNodesAction.INSTANCE, updateDesiredNodesRequest, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java index 1eda532439e19..70df369ef9bff 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestVerifyRepositoryAction extends BaseRestHandler { @@ -38,7 +39,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String name = request.param("repository"); VerifyRepositoryRequest verifyRepositoryRequest = new VerifyRepositoryRequest(name); - verifyRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", verifyRepositoryRequest.masterNodeTimeout())); + verifyRepositoryRequest.masterNodeTimeout(getMasterNodeTimeout(request)); verifyRepositoryRequest.ackTimeout(request.paramAsTime("timeout", verifyRepositoryRequest.ackTimeout())); return channel -> client.admin().cluster().verifyRepository(verifyRepositoryRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java index 6fbfaa11b83e0..7b97d88f3f85b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java @@ -20,6 +20,7 @@ import static org.elasticsearch.rest.RestRequest.Method.DELETE; import static org.elasticsearch.rest.RestStatus.ACCEPTED; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestDeleteDanglingIndexAction extends BaseRestHandler { @@ -41,7 +42,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, NodeClient ); deleteRequest.ackTimeout(request.paramAsTime("timeout", deleteRequest.ackTimeout())); - deleteRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRequest.masterNodeTimeout())); + deleteRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute( TransportDeleteDanglingIndexAction.TYPE, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java index b20eac028ba02..00af47fea8dc9 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java @@ -20,6 +20,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestStatus.ACCEPTED; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestImportDanglingIndexAction extends BaseRestHandler { @Override @@ -40,7 +41,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, NodeClient ); importRequest.ackTimeout(request.paramAsTime("timeout", importRequest.ackTimeout())); - importRequest.masterNodeTimeout(request.paramAsTime("master_timeout", importRequest.masterNodeTimeout())); + importRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute( TransportImportDanglingIndexAction.TYPE, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAddIndexBlockAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAddIndexBlockAction.java index 9498541d5a305..4031de3720333 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAddIndexBlockAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAddIndexBlockAction.java @@ -22,6 +22,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.rest.Scope.PUBLIC; @ServerlessScope(PUBLIC) @@ -43,7 +44,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC IndexMetadata.APIBlock.fromName(request.param("block")), Strings.splitStringByCommaToArray(request.param("index")) ); - addIndexBlockRequest.masterNodeTimeout(request.paramAsTime("master_timeout", addIndexBlockRequest.masterNodeTimeout())); + addIndexBlockRequest.masterNodeTimeout(getMasterNodeTimeout(request)); addIndexBlockRequest.ackTimeout(request.paramAsTime("timeout", addIndexBlockRequest.ackTimeout())); addIndexBlockRequest.indicesOptions(IndicesOptions.fromRequest(request, addIndexBlockRequest.indicesOptions())); return channel -> client.admin().indices().addBlock(addIndexBlockRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java index ea906955785b4..f79aefde8e14a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java @@ -27,6 +27,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestCloseIndexAction extends BaseRestHandler { @@ -47,7 +48,7 @@ public String getName() { @UpdateForV9 public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { CloseIndexRequest closeIndexRequest = new CloseIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); - closeIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", closeIndexRequest.masterNodeTimeout())); + closeIndexRequest.masterNodeTimeout(getMasterNodeTimeout(request)); closeIndexRequest.ackTimeout(request.paramAsTime("timeout", closeIndexRequest.ackTimeout())); closeIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, closeIndexRequest.indicesOptions())); String waitForActiveShards = request.param("wait_for_active_shards"); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java index 37106059b7b9e..5f0e3391b762a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java @@ -31,6 +31,7 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestCreateIndexAction extends BaseRestHandler { @@ -77,7 +78,7 @@ static CreateIndexRequest prepareRequestV7(RestRequest request) { } createIndexRequest.ackTimeout(request.paramAsTime("timeout", createIndexRequest.ackTimeout())); - createIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createIndexRequest.masterNodeTimeout())); + createIndexRequest.masterNodeTimeout(getMasterNodeTimeout(request)); createIndexRequest.waitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); return createIndexRequest; } @@ -116,7 +117,7 @@ static CreateIndexRequest prepareRequest(RestRequest request) { } createIndexRequest.ackTimeout(request.paramAsTime("timeout", createIndexRequest.ackTimeout())); - createIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createIndexRequest.masterNodeTimeout())); + createIndexRequest.masterNodeTimeout(getMasterNodeTimeout(request)); createIndexRequest.waitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); return createIndexRequest; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComponentTemplateAction.java index 733e35ba7f927..14375f2e0a483 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComponentTemplateAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestDeleteComponentTemplateAction extends BaseRestHandler { @@ -39,7 +40,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String[] names = Strings.splitStringByCommaToArray(request.param("name")); TransportDeleteComponentTemplateAction.Request deleteReq = new TransportDeleteComponentTemplateAction.Request(names); - deleteReq.masterNodeTimeout(request.paramAsTime("master_timeout", deleteReq.masterNodeTimeout())); + deleteReq.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(TransportDeleteComponentTemplateAction.TYPE, deleteReq, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComposableIndexTemplateAction.java index 8c84fb054718e..2ee1fc0a18dee 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComposableIndexTemplateAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestDeleteComposableIndexTemplateAction extends BaseRestHandler { @@ -40,7 +41,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String[] names = Strings.splitStringByCommaToArray(request.param("name")); TransportDeleteComposableIndexTemplateAction.Request deleteReq = new TransportDeleteComposableIndexTemplateAction.Request(names); - deleteReq.masterNodeTimeout(request.paramAsTime("master_timeout", deleteReq.masterNodeTimeout())); + deleteReq.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute( TransportDeleteComposableIndexTemplateAction.TYPE, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexAction.java index b39cd6ca0ded1..1e0b2c8441fcd 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexAction.java @@ -22,6 +22,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestDeleteIndexAction extends BaseRestHandler { @@ -40,7 +41,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); deleteIndexRequest.ackTimeout(request.paramAsTime("timeout", deleteIndexRequest.ackTimeout())); - deleteIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexRequest.masterNodeTimeout())); + deleteIndexRequest.masterNodeTimeout(getMasterNodeTimeout(request)); deleteIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteIndexRequest.indicesOptions())); return channel -> client.admin().indices().delete(deleteIndexRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java index 4ba54206f40fc..3c2ff2777f504 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java @@ -17,6 +17,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestDeleteIndexTemplateAction extends BaseRestHandler { @@ -33,7 +34,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteIndexTemplateRequest deleteIndexTemplateRequest = new DeleteIndexTemplateRequest(request.param("name")); - deleteIndexTemplateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexTemplateRequest.masterNodeTimeout())); + deleteIndexTemplateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().indices().deleteTemplate(deleteIndexTemplateRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetComponentTemplateAction.java index 84e7865d9f699..867466ffb4052 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetComponentTemplateAction.java @@ -25,6 +25,7 @@ import static org.elasticsearch.rest.RestRequest.Method.HEAD; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestGetComponentTemplateAction extends BaseRestHandler { @@ -49,7 +50,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final GetComponentTemplateAction.Request getRequest = new GetComponentTemplateAction.Request(request.param("name")); getRequest.includeDefaults(request.paramAsBoolean("include_defaults", false)); getRequest.local(request.paramAsBoolean("local", getRequest.local())); - getRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRequest.masterNodeTimeout())); + getRequest.masterNodeTimeout(getMasterNodeTimeout(request)); final boolean implicitAll = getRequest.name() == null; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetComposableIndexTemplateAction.java index 0981d5820131e..d2349cb9126a1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetComposableIndexTemplateAction.java @@ -25,6 +25,7 @@ import static org.elasticsearch.rest.RestRequest.Method.HEAD; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestGetComposableIndexTemplateAction extends BaseRestHandler { @@ -48,7 +49,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final GetComposableIndexTemplateAction.Request getRequest = new GetComposableIndexTemplateAction.Request(request.param("name")); getRequest.local(request.paramAsBoolean("local", getRequest.local())); - getRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRequest.masterNodeTimeout())); + getRequest.masterNodeTimeout(getMasterNodeTimeout(request)); getRequest.includeDefaults(request.paramAsBoolean("include_defaults", false)); final boolean implicitAll = getRequest.name() == null; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java index 2efcfe3bef119..76252c8936c82 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java @@ -27,6 +27,7 @@ import static org.elasticsearch.rest.RestRequest.Method.HEAD; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * The REST handler for get template and head template APIs. @@ -59,7 +60,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final GetIndexTemplatesRequest getIndexTemplatesRequest = new GetIndexTemplatesRequest(names); getIndexTemplatesRequest.local(request.paramAsBoolean("local", getIndexTemplatesRequest.local())); - getIndexTemplatesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexTemplatesRequest.masterNodeTimeout())); + getIndexTemplatesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); final boolean implicitAll = getIndexTemplatesRequest.names().length == 0; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java index db10bdd985d59..0ad3eff5cb6d1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java @@ -29,6 +29,7 @@ import static org.elasticsearch.common.util.set.Sets.addToCopy; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.HEAD; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * The REST handler for get index and head index APIs. @@ -65,7 +66,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC getIndexRequest.indices(indices); getIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, getIndexRequest.indicesOptions())); getIndexRequest.local(request.paramAsBoolean("local", getIndexRequest.local())); - getIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexRequest.masterNodeTimeout())); + getIndexRequest.masterNodeTimeout(getMasterNodeTimeout(request)); getIndexRequest.humanReadable(request.paramAsBoolean("human", false)); getIndexRequest.includeDefaults(request.paramAsBoolean("include_defaults", false)); getIndexRequest.features(GetIndexRequest.Feature.fromRequest(request)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java index 065399076c12a..66c7c357e790e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java @@ -28,6 +28,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.HEAD; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestGetMappingAction extends BaseRestHandler { @@ -84,7 +85,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final GetMappingsRequest getMappingsRequest = new GetMappingsRequest(); getMappingsRequest.indices(indices); getMappingsRequest.indicesOptions(IndicesOptions.fromRequest(request, getMappingsRequest.indicesOptions())); - final TimeValue timeout = request.paramAsTime("master_timeout", getMappingsRequest.masterNodeTimeout()); + final TimeValue timeout = getMasterNodeTimeout(request); getMappingsRequest.masterNodeTimeout(timeout); getMappingsRequest.local(request.paramAsBoolean("local", getMappingsRequest.local())); final HttpChannel httpChannel = request.getHttpChannel(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java index af72e66f6127d..96384d7e86a97 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestGetSettingsAction extends BaseRestHandler { @@ -55,7 +56,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC .includeDefaults(renderDefaults) .names(names); getSettingsRequest.local(request.paramAsBoolean("local", getSettingsRequest.local())); - getSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSettingsRequest.masterNodeTimeout())); + getSettingsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().indices().getSettings(getSettingsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java index 67de902d50e91..348ec87ed0747 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestIndexDeleteAliasesAction extends BaseRestHandler { @@ -42,7 +43,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); indicesAliasesRequest.ackTimeout(request.paramAsTime("timeout", indicesAliasesRequest.ackTimeout())); indicesAliasesRequest.addAliasAction(AliasActions.remove().indices(indices).aliases(aliases)); - indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); + indicesAliasesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().indices().aliases(indicesAliasesRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java index 7395f00d733b1..93eac4c448522 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java @@ -24,6 +24,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestIndexPutAliasAction extends BaseRestHandler { @@ -99,7 +100,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); indicesAliasesRequest.ackTimeout(request.paramAsTime("timeout", indicesAliasesRequest.ackTimeout())); - indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); + indicesAliasesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); IndicesAliasesRequest.AliasActions aliasAction = AliasActions.add().indices(indices).alias(alias); if (routing != null) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java index e603d7647966f..b6a407942f629 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestIndicesAliasesAction extends BaseRestHandler { @@ -38,7 +39,7 @@ public List routes() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); - indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); + indicesAliasesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); indicesAliasesRequest.ackTimeout(request.paramAsTime("timeout", indicesAliasesRequest.ackTimeout())); try (XContentParser parser = request.contentParser()) { IndicesAliasesRequest.PARSER.parse(parser, indicesAliasesRequest, null); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestOpenIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestOpenIndexAction.java index 4e00bbb5966b8..3c95ff8a17d7d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestOpenIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestOpenIndexAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestOpenIndexAction extends BaseRestHandler { @@ -41,7 +42,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { OpenIndexRequest openIndexRequest = new OpenIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); openIndexRequest.ackTimeout(request.paramAsTime("timeout", openIndexRequest.ackTimeout())); - openIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", openIndexRequest.masterNodeTimeout())); + openIndexRequest.masterNodeTimeout(getMasterNodeTimeout(request)); openIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, openIndexRequest.indicesOptions())); String waitForActiveShards = request.param("wait_for_active_shards"); if (waitForActiveShards != null) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComponentTemplateAction.java index fd6f529d876a2..4762e6d09f100 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComponentTemplateAction.java @@ -22,6 +22,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestPutComponentTemplateAction extends BaseRestHandler { @@ -40,7 +41,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { PutComponentTemplateAction.Request putRequest = new PutComponentTemplateAction.Request(request.param("name")); - putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); + putRequest.masterNodeTimeout(getMasterNodeTimeout(request)); putRequest.create(request.paramAsBoolean("create", false)); putRequest.cause(request.param("cause", "api")); try (var parser = request.contentParser()) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java index 3171c18bc9e28..4b94691f83b7d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java @@ -22,6 +22,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestPutComposableIndexTemplateAction extends BaseRestHandler { @@ -42,7 +43,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC TransportPutComposableIndexTemplateAction.Request putRequest = new TransportPutComposableIndexTemplateAction.Request( request.param("name") ); - putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); + putRequest.masterNodeTimeout(getMasterNodeTimeout(request)); putRequest.create(request.paramAsBoolean("create", false)); putRequest.cause(request.param("cause", "api")); try (var parser = request.contentParser()) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java index 74ca5e9d3921b..44df58b95292d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java @@ -26,6 +26,7 @@ import static java.util.Arrays.asList; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestPutIndexTemplateAction extends BaseRestHandler { @@ -62,7 +63,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC putRequest.patterns(asList(request.paramAsStringArray("index_patterns", Strings.EMPTY_ARRAY))); } putRequest.order(request.paramAsInt("order", putRequest.order())); - putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); + putRequest.masterNodeTimeout(getMasterNodeTimeout(request)); putRequest.create(request.paramAsBoolean("create", false)); putRequest.cause(request.param("cause", "")); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java index 5d4d913767fe4..6ee90db500eaf 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java @@ -29,6 +29,7 @@ import static org.elasticsearch.index.mapper.MapperService.isMappingSourceTyped; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestPutMappingAction extends BaseRestHandler { @@ -91,7 +92,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC } putMappingRequest.ackTimeout(request.paramAsTime("timeout", putMappingRequest.ackTimeout())); - putMappingRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putMappingRequest.masterNodeTimeout())); + putMappingRequest.masterNodeTimeout(getMasterNodeTimeout(request)); putMappingRequest.indicesOptions(IndicesOptions.fromRequest(request, putMappingRequest.indicesOptions())); putMappingRequest.writeIndexOnly(request.paramAsBoolean("write_index_only", false)); return channel -> client.admin().indices().putMapping(putMappingRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java index c72508a9bf646..d6c1ff4b71108 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java @@ -24,6 +24,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public abstract class RestResizeHandler extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestResizeHandler.class); @@ -50,7 +51,7 @@ public final RestChannelConsumer prepareRequest(final RestRequest request, final resizeRequest.setResizeType(getResizeType()); request.applyContentParser(resizeRequest::fromXContent); resizeRequest.ackTimeout(request.paramAsTime("timeout", resizeRequest.ackTimeout())); - resizeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", resizeRequest.masterNodeTimeout())); + resizeRequest.masterNodeTimeout(getMasterNodeTimeout(request)); resizeRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); return channel -> client.admin().indices().resizeIndex(resizeRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java index 59c37c2c015a6..4d39e44018055 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java @@ -26,6 +26,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestRolloverIndexAction extends BaseRestHandler { @@ -52,7 +53,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC rolloverIndexRequest.dryRun(request.paramAsBoolean("dry_run", false)); rolloverIndexRequest.lazy(request.paramAsBoolean("lazy", false)); rolloverIndexRequest.ackTimeout(request.paramAsTime("timeout", rolloverIndexRequest.ackTimeout())); - rolloverIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", rolloverIndexRequest.masterNodeTimeout())); + rolloverIndexRequest.masterNodeTimeout(getMasterNodeTimeout(request)); if (DataStream.isFailureStoreFeatureFlagEnabled()) { boolean failureStore = request.paramAsBoolean("target_failure_store", false); if (failureStore) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java index 7eab7168cd100..c8b30765ab2c7 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestSimulateIndexTemplateAction extends BaseRestHandler { @@ -40,9 +41,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { SimulateIndexTemplateRequest simulateIndexTemplateRequest = new SimulateIndexTemplateRequest(request.param("name")); - simulateIndexTemplateRequest.masterNodeTimeout( - request.paramAsTime("master_timeout", simulateIndexTemplateRequest.masterNodeTimeout()) - ); + simulateIndexTemplateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); simulateIndexTemplateRequest.includeDefaults(request.paramAsBoolean("include_defaults", false)); if (request.hasContent()) { TransportPutComposableIndexTemplateAction.Request indexTemplateRequest = new TransportPutComposableIndexTemplateAction.Request( diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java index bc38d549926af..53ab068c86695 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java @@ -22,6 +22,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestSimulateTemplateAction extends BaseRestHandler { @@ -52,7 +53,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli simulateRequest.indexTemplateRequest(indexTemplateRequest); } - simulateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", simulateRequest.masterNodeTimeout())); + simulateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(SimulateTemplateAction.INSTANCE, simulateRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java index 222a22e5da3e3..af63bd23ef843 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java @@ -24,6 +24,7 @@ import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestUpdateSettingsAction extends BaseRestHandler { @@ -44,7 +45,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indices); updateSettingsRequest.ackTimeout(request.paramAsTime("timeout", updateSettingsRequest.ackTimeout())); updateSettingsRequest.setPreserveExisting(request.paramAsBoolean("preserve_existing", updateSettingsRequest.isPreserveExisting())); - updateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateSettingsRequest.masterNodeTimeout())); + updateSettingsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); updateSettingsRequest.indicesOptions(IndicesOptions.fromRequest(request, updateSettingsRequest.indicesOptions())); updateSettingsRequest.reopen(request.paramAsBoolean("reopen", false)); try (var parser = request.contentParser()) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java index 570fb0ebc7c77..806e3939b6d1e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java @@ -34,6 +34,7 @@ import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestAllocationAction extends AbstractCatAction { @@ -61,7 +62,7 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().routingTable(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java index 4a238451bcc69..4faf44ff8c5a7 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java @@ -34,6 +34,7 @@ import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * cat API class for handling get componentTemplate. @@ -76,7 +77,7 @@ protected BaseRestHandler.RestChannelConsumer doCatRequest(RestRequest request, final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().metadata(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestResponseListener<>(channel) { @Override public RestResponse buildResponse(ClusterStateResponse clusterStateResponse) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java index ccfcb9b505e92..ca3bcfbcd38e0 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java @@ -44,9 +44,9 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicReference; -import static org.elasticsearch.action.support.master.MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; import static org.elasticsearch.common.util.set.Sets.addToCopy; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestIndicesAction extends AbstractCatAction { @@ -79,7 +79,7 @@ protected void documentation(StringBuilder sb) { public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.strictExpand()); - final TimeValue masterNodeTimeout = request.paramAsTime("master_timeout", DEFAULT_MASTER_NODE_TIMEOUT); + final TimeValue masterNodeTimeout = getMasterNodeTimeout(request); final boolean includeUnloadedSegments = request.paramAsBoolean("include_unloaded_segments", false); return channel -> { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java index 109fd026502c9..b0805bf423453 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestMasterAction extends AbstractCatAction { @@ -47,7 +48,7 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestResponseListener(channel) { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java index 9a032ce064cf6..83e6ea35ec520 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java @@ -31,6 +31,7 @@ import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestNodeAttrsAction extends AbstractCatAction { @@ -55,7 +56,7 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index e5e0f9ee926f3..2c1f57f291969 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -60,6 +60,7 @@ import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestNodesAction extends AbstractCatAction { @@ -86,7 +87,7 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); final NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); nodesInfoRequest.clear() diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java index 19ebbd2f19df4..5ed0cd722d5db 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestPendingClusterTasksAction extends AbstractCatAction { @@ -45,7 +46,7 @@ protected void documentation(StringBuilder sb) { @Override public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest(); - pendingClusterTasksRequest.masterNodeTimeout(request.paramAsTime("master_timeout", pendingClusterTasksRequest.masterNodeTimeout())); + pendingClusterTasksRequest.masterNodeTimeout(getMasterNodeTimeout(request)); pendingClusterTasksRequest.local(request.paramAsBoolean("local", pendingClusterTasksRequest.local())); return channel -> client.execute( TransportPendingClusterTasksAction.TYPE, diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java index 7aba2c8e38a6d..0e459b53d203c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java @@ -31,6 +31,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestPluginsAction extends AbstractCatAction { @@ -56,7 +57,7 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java index 4e32a3635872f..5744923b86d6c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java @@ -22,6 +22,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Cat API class to display information about snapshot repositories @@ -38,7 +39,7 @@ public List routes() { protected RestChannelConsumer doCatRequest(RestRequest request, NodeClient client) { GetRepositoriesRequest getRepositoriesRequest = new GetRepositoriesRequest(); getRepositoriesRequest.local(request.paramAsBoolean("local", getRepositoriesRequest.local())); - getRepositoriesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRepositoriesRequest.masterNodeTimeout())); + getRepositoriesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin() .cluster() diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java index dda03d515e828..1f11a662c0abf 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java @@ -33,6 +33,7 @@ import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestSegmentsAction extends AbstractCatAction { @@ -58,7 +59,7 @@ protected RestChannelConsumer doCatRequest(final RestRequest request, final Node final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); clusterStateRequest.clear().nodes(true).routingTable(true).indices(indices); final RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index e2fb0573d724a..50389744e1129 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -53,6 +53,7 @@ import java.util.function.Function; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestShardsAction extends AbstractCatAction { @@ -83,7 +84,7 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final var clusterStateRequest = new ClusterStateRequest(); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); clusterStateRequest.clear().nodes(true).routingTable(true).indices(indices).indicesOptions(IndicesOptions.strictExpandHidden()); return channel -> { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java index 9b4c6534a452f..0ff44e37698d9 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java @@ -31,6 +31,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Cat API class to display information about snapshots @@ -56,7 +57,7 @@ protected RestChannelConsumer doCatRequest(final RestRequest request, NodeClient getSnapshotsRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", getSnapshotsRequest.ignoreUnavailable())); - getSnapshotsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSnapshotsRequest.masterNodeTimeout())); + getSnapshotsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().getSnapshots(getSnapshotsRequest, new RestResponseListener<>(channel) { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java index 929de981ce146..849e2d68cb2dc 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java @@ -29,6 +29,7 @@ import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestTemplatesAction extends AbstractCatAction { @@ -56,15 +57,13 @@ protected RestChannelConsumer doCatRequest(final RestRequest request, NodeClient ? new GetIndexTemplatesRequest() : new GetIndexTemplatesRequest(matchPattern); getIndexTemplatesRequest.local(request.paramAsBoolean("local", getIndexTemplatesRequest.local())); - getIndexTemplatesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexTemplatesRequest.masterNodeTimeout())); + getIndexTemplatesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); final GetComposableIndexTemplateAction.Request getComposableTemplatesRequest = new GetComposableIndexTemplateAction.Request( matchPattern ); getComposableTemplatesRequest.local(request.paramAsBoolean("local", getComposableTemplatesRequest.local())); - getComposableTemplatesRequest.masterNodeTimeout( - request.paramAsTime("master_timeout", getComposableTemplatesRequest.masterNodeTimeout()) - ); + getComposableTemplatesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java index 9ca0dae8c8740..260ce4a3aeb3d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java @@ -45,6 +45,7 @@ import static org.elasticsearch.common.util.set.Sets.addToCopy; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestThreadPoolAction extends AbstractCatAction { @@ -72,7 +73,7 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java index 3049b9096004e..bf78612ccf5a9 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestDeletePipelineAction extends BaseRestHandler { @@ -36,7 +37,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { DeletePipelineRequest request = new DeletePipelineRequest(restRequest.param("id")); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); return channel -> client.admin().cluster().deletePipeline(request, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestGetPipelineAction.java b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestGetPipelineAction.java index e87a78c6b658e..d6712b44f3e03 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestGetPipelineAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestGetPipelineAction.java @@ -22,6 +22,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestGetPipelineAction extends BaseRestHandler { @@ -42,7 +43,7 @@ public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient cl restRequest.paramAsBoolean("summary", false), Strings.splitStringByCommaToArray(restRequest.param("id")) ); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.admin().cluster().getPipeline(request, new RestToXContentListener<>(channel, GetPipelineResponse::status)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java index 30b3448a04883..907479bddff16 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java @@ -24,6 +24,7 @@ import java.util.Locale; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestPutPipelineAction extends BaseRestHandler { @@ -54,7 +55,7 @@ public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient cl Tuple sourceTuple = restRequest.contentOrSourceParam(); PutPipelineRequest request = new PutPipelineRequest(restRequest.param("id"), sourceTuple.v2(), sourceTuple.v1(), ifVersion); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); return channel -> client.admin().cluster().putPipeline(request, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 753b6f8cb710a..41796967c3870 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -1455,9 +1455,6 @@ private static void validateSearchSource(SearchSourceBuilder source, boolean has if (hasScroll) { throw new IllegalArgumentException("cannot use `collapse` in a scroll context"); } - if (source.rescores() != null && source.rescores().isEmpty() == false) { - throw new IllegalArgumentException("cannot use `collapse` in conjunction with `rescore`"); - } } if (source.slice() != null) { if (source.pointInTimeBuilder() == null && (hasScroll == false)) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 4939c3bc88744..f29850a306b75 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -265,7 +265,7 @@ public InternalDateHistogram(StreamInput in) throws IOException { } buckets = in.readCollectionAsList(stream -> new Bucket(stream, keyed, format)); // we changed the order format in 8.13 for partial reduce, therefore we need to order them to perform merge sort - if (in.getTransportVersion().between(TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { + if (in.getTransportVersion().between(TransportVersions.V_8_13_0, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { // list is mutable by #readCollectionAsList contract buckets.sort(Comparator.comparingLong(b -> b.key)); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index 4ff01c5648486..7b264ccb022e5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -244,7 +244,7 @@ public InternalHistogram(StreamInput in) throws IOException { keyed = in.readBoolean(); buckets = in.readCollectionAsList(stream -> new Bucket(stream, keyed, format)); // we changed the order format in 8.13 for partial reduce, therefore we need to order them to perform merge sort - if (in.getTransportVersion().between(TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { + if (in.getTransportVersion().between(TransportVersions.V_8_13_0, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { // list is mutable by #readCollectionAsList contract buckets.sort(Comparator.comparingDouble(b -> b.key)); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java index 05944b75d06d5..46b5a1b7629d8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java @@ -261,7 +261,7 @@ public InternalVariableWidthHistogram(StreamInput in) throws IOException { buckets = in.readCollectionAsList(stream -> new Bucket(stream, format)); targetNumBuckets = in.readVInt(); // we changed the order format in 8.13 for partial reduce, therefore we need to order them to perform merge sort - if (in.getTransportVersion().between(TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { + if (in.getTransportVersion().between(TransportVersions.V_8_13_0, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { // list is mutable by #readCollectionAsList contract buckets.sort(Comparator.comparingDouble(b -> b.centroid)); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java index 51901b422c861..1a793ecd80b11 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java @@ -7,9 +7,13 @@ */ package org.elasticsearch.search.aggregations.bucket.range; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationExecutionContext; @@ -130,9 +134,9 @@ protected void doCollect(LeafBucketCollector sub, int doc, long bucket) throws I abstract static class SortedSetRangeLeafCollector extends LeafBucketCollectorBase { - final long[] froms, tos, maxTos; - final SortedSetDocValues values; - final LeafBucketCollector sub; + private final long[] froms, tos, maxTos; + private final DocCollector collector; + private final LeafBucketCollector sub; SortedSetRangeLeafCollector(SortedSetDocValues values, Range[] ranges, LeafBucketCollector sub) throws IOException { super(sub, values); @@ -141,7 +145,23 @@ abstract static class SortedSetRangeLeafCollector extends LeafBucketCollectorBas throw new IllegalArgumentException("Ranges must be sorted"); } } - this.values = values; + final SortedDocValues singleton = DocValues.unwrapSingleton(values); + if (singleton != null) { + this.collector = (doc, bucket) -> { + if (singleton.advanceExact(doc)) { + collect(doc, singleton.ordValue(), bucket, 0); + } + }; + } else { + this.collector = (doc, bucket) -> { + if (values.advanceExact(doc)) { + int lo = 0; + for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) { + lo = collect(doc, ord, bucket, lo); + } + } + }; + } this.sub = sub; froms = new long[ranges.length]; tos = new long[ranges.length]; // inclusive @@ -174,12 +194,7 @@ abstract static class SortedSetRangeLeafCollector extends LeafBucketCollectorBas @Override public void collect(int doc, long bucket) throws IOException { - if (values.advanceExact(doc)) { - int lo = 0; - for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) { - lo = collect(doc, ord, bucket, lo); - } - } + collector.collect(doc, bucket); } private int collect(int doc, long ord, long bucket, int lowBound) throws IOException { @@ -236,10 +251,10 @@ private int collect(int doc, long ord, long bucket, int lowBound) throws IOExcep abstract static class SortedBinaryRangeLeafCollector extends LeafBucketCollectorBase { - final Range[] ranges; - final BytesRef[] maxTos; - final SortedBinaryDocValues values; - final LeafBucketCollector sub; + private final Range[] ranges; + private final BytesRef[] maxTos; + private final DocCollector collector; + private final LeafBucketCollector sub; SortedBinaryRangeLeafCollector(SortedBinaryDocValues values, Range[] ranges, LeafBucketCollector sub) { super(sub, values); @@ -248,7 +263,22 @@ abstract static class SortedBinaryRangeLeafCollector extends LeafBucketCollector throw new IllegalArgumentException("Ranges must be sorted"); } } - this.values = values; + final BinaryDocValues singleton = FieldData.unwrapSingleton(values); + if (singleton != null) { + this.collector = (doc, bucket) -> { + if (singleton.advanceExact(doc)) { + collect(doc, singleton.binaryValue(), bucket, 0); + } + }; + } else { + this.collector = (doc, bucket) -> { + if (values.advanceExact(doc)) { + for (int i = 0, lo = 0; i < values.docValueCount(); ++i) { + lo = collect(doc, values.nextValue(), bucket, lo); + } + } + }; + } this.sub = sub; this.ranges = ranges; maxTos = new BytesRef[ranges.length]; @@ -266,13 +296,7 @@ abstract static class SortedBinaryRangeLeafCollector extends LeafBucketCollector @Override public void collect(int doc, long bucket) throws IOException { - if (values.advanceExact(doc)) { - final int valuesCount = values.docValueCount(); - for (int i = 0, lo = 0; i < valuesCount; ++i) { - final BytesRef value = values.nextValue(); - lo = collect(doc, value, bucket, lo); - } - } + collector.collect(doc, bucket); } private int collect(int doc, BytesRef value, long bucket, int lowBound) throws IOException { @@ -327,6 +351,11 @@ private int collect(int doc, BytesRef value, long bucket, int lowBound) throws I protected abstract void doCollect(LeafBucketCollector sub, int doc, long bucket) throws IOException; } + @FunctionalInterface + private interface DocCollector { + void collect(int doc, long bucket) throws IOException; + } + @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { return buildAggregationsForFixedBucketCount( diff --git a/server/src/main/java/org/elasticsearch/search/builder/PointInTimeBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/PointInTimeBuilder.java index 8a385be82fb12..1966f7eaa1e69 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/PointInTimeBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/PointInTimeBuilder.java @@ -8,7 +8,10 @@ package org.elasticsearch.search.builder; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.search.SearchContextId; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -22,6 +25,7 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.util.Base64; import java.util.Objects; /** @@ -35,7 +39,7 @@ public final class PointInTimeBuilder implements Writeable, ToXContentFragment { static { PARSER = new ObjectParser<>(SearchSourceBuilder.POINT_IN_TIME.getPreferredName(), XContentParams::new); - PARSER.declareString((params, id) -> params.encodedId = id, ID_FIELD); + PARSER.declareString((params, id) -> params.encodedId = new BytesArray(Base64.getUrlDecoder().decode(id)), ID_FIELD); PARSER.declareField( (params, keepAlive) -> params.keepAlive = keepAlive, (p, c) -> TimeValue.parseTimeValue(p.text(), KEEP_ALIVE_FIELD.getPreferredName()), @@ -45,32 +49,40 @@ public final class PointInTimeBuilder implements Writeable, ToXContentFragment { } private static final class XContentParams { - private String encodedId; + private BytesReference encodedId; private TimeValue keepAlive; } - private final String encodedId; + private final BytesReference encodedId; private transient SearchContextId searchContextId; // lazily decoded from the encodedId private TimeValue keepAlive; - public PointInTimeBuilder(String pitID) { + public PointInTimeBuilder(BytesReference pitID) { this.encodedId = Objects.requireNonNull(pitID, "Point in time ID must be provided"); } public PointInTimeBuilder(StreamInput in) throws IOException { - encodedId = in.readString(); + if (in.getTransportVersion().onOrAfter(TransportVersions.BINARY_PIT_ID)) { + encodedId = in.readBytesReference(); + } else { + encodedId = new BytesArray(Base64.getUrlDecoder().decode(in.readString())); + } keepAlive = in.readOptionalTimeValue(); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(encodedId); + if (out.getTransportVersion().onOrAfter(TransportVersions.BINARY_PIT_ID)) { + out.writeBytesReference(encodedId); + } else { + out.writeString(Base64.getUrlEncoder().encodeToString(BytesReference.toBytes(encodedId))); + } out.writeOptionalTimeValue(keepAlive); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(ID_FIELD.getPreferredName(), encodedId); + builder.field(ID_FIELD.getPreferredName(), Base64.getUrlEncoder().encodeToString(BytesReference.toBytes(encodedId))); if (keepAlive != null) { builder.field(KEEP_ALIVE_FIELD.getPreferredName(), keepAlive.getStringRep()); } @@ -88,7 +100,7 @@ public static PointInTimeBuilder fromXContent(XContentParser parser) throws IOEx /** * Returns the encoded id of this point in time */ - public String getEncodedId() { + public BytesReference getEncodedId() { return encodedId; } diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 2023ee2e8d4b6..488c956c187d5 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -440,7 +440,7 @@ public void source(SearchSourceBuilder source) { // of shard-level search requests. However, we need to assign as a dummy PIT instead of null as we verify PIT for // slice requests on data nodes. source = source.shallowCopy(); - source.pointInTimeBuilder(new PointInTimeBuilder("")); + source.pointInTimeBuilder(new PointInTimeBuilder(BytesArray.EMPTY)); } this.source = source; } diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java index 7fd09d3ddfdf1..2286eb2e69f88 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java @@ -256,21 +256,6 @@ static CollectorManager createQueryPhaseCollectorMa searchContext.scrollContext(), searchContext.numberOfShards() ); - } else if (searchContext.collapse() != null) { - boolean trackScores = searchContext.sort() == null || searchContext.trackScores(); - int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs); - return forCollapsing( - postFilterWeight, - terminateAfterChecker, - aggsCollectorManager, - searchContext.minimumScore(), - searchContext.getProfilers() != null, - searchContext.collapse(), - searchContext.sort(), - numDocs, - trackScores, - searchContext.searchAfter() - ); } else { int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs); final boolean rescore = searchContext.rescore().isEmpty() == false; @@ -280,21 +265,37 @@ static CollectorManager createQueryPhaseCollectorMa numDocs = Math.max(numDocs, rescoreContext.getWindowSize()); } } - return new WithHits( - postFilterWeight, - terminateAfterChecker, - aggsCollectorManager, - searchContext.minimumScore(), - searchContext.getProfilers() != null, - reader, - query, - searchContext.sort(), - searchContext.searchAfter(), - numDocs, - searchContext.trackScores(), - searchContext.trackTotalHitsUpTo(), - hasFilterCollector - ); + if (searchContext.collapse() != null) { + boolean trackScores = searchContext.sort() == null || searchContext.trackScores(); + return forCollapsing( + postFilterWeight, + terminateAfterChecker, + aggsCollectorManager, + searchContext.minimumScore(), + searchContext.getProfilers() != null, + searchContext.collapse(), + searchContext.sort(), + numDocs, + trackScores, + searchContext.searchAfter() + ); + } else { + return new WithHits( + postFilterWeight, + terminateAfterChecker, + aggsCollectorManager, + searchContext.minimumScore(), + searchContext.getProfilers() != null, + reader, + query, + searchContext.sort(), + searchContext.searchAfter(), + numDocs, + searchContext.trackScores(), + searchContext.trackTotalHitsUpTo(), + hasFilterCollector + ); + } } } diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java b/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java index 81f079b74c18f..697aa6099ca97 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java @@ -8,13 +8,18 @@ package org.elasticsearch.search.rescore; +import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.lucene.grouping.TopFieldGroups; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Map; /** * Rescore phase of a search request, used to run potentially expensive scoring models against the top matching documents. @@ -24,7 +29,7 @@ public class RescorePhase { private RescorePhase() {} public static void execute(SearchContext context) { - if (context.size() == 0 || context.collapse() != null || context.rescore() == null || context.rescore().isEmpty()) { + if (context.size() == 0 || context.rescore() == null || context.rescore().isEmpty()) { return; } @@ -32,6 +37,11 @@ public static void execute(SearchContext context) { if (topDocs.scoreDocs.length == 0) { return; } + TopFieldGroups topGroups = null; + if (topDocs instanceof TopFieldGroups topFieldGroups) { + assert context.collapse() != null; + topGroups = topFieldGroups; + } try { for (RescoreContext ctx : context.rescore()) { topDocs = ctx.rescorer().rescore(topDocs, context.searcher(), ctx); @@ -39,6 +49,15 @@ public static void execute(SearchContext context) { // here we only assert that this condition is met. assert context.sort() == null && topDocsSortedByScore(topDocs) : "topdocs should be sorted after rescore"; } + if (topGroups != null) { + assert context.collapse() != null; + /** + * Since rescorers don't preserve collapsing, we must reconstruct the group and field + * values from the originalTopGroups to create a new {@link TopFieldGroups} from the + * rescored top documents. + */ + topDocs = rewriteTopGroups(topGroups, topDocs); + } context.queryResult() .topDocs(new TopDocsAndMaxScore(topDocs, topDocs.scoreDocs[0].score), context.queryResult().sortValueFormats()); } catch (IOException e) { @@ -46,6 +65,29 @@ public static void execute(SearchContext context) { } } + private static TopFieldGroups rewriteTopGroups(TopFieldGroups originalTopGroups, TopDocs rescoredTopDocs) { + assert originalTopGroups.fields.length == 1 && SortField.FIELD_SCORE.equals(originalTopGroups.fields[0]) + : "rescore must always sort by score descending"; + Map docIdToGroupValue = Maps.newMapWithExpectedSize(originalTopGroups.scoreDocs.length); + for (int i = 0; i < originalTopGroups.scoreDocs.length; i++) { + docIdToGroupValue.put(originalTopGroups.scoreDocs[i].doc, originalTopGroups.groupValues[i]); + } + var newScoreDocs = new FieldDoc[rescoredTopDocs.scoreDocs.length]; + var newGroupValues = new Object[originalTopGroups.groupValues.length]; + int pos = 0; + for (var doc : rescoredTopDocs.scoreDocs) { + newScoreDocs[pos] = new FieldDoc(doc.doc, doc.score, new Object[] { doc.score }); + newGroupValues[pos++] = docIdToGroupValue.get(doc.doc); + } + return new TopFieldGroups( + originalTopGroups.field, + originalTopGroups.totalHits, + newScoreDocs, + originalTopGroups.fields, + newGroupValues + ); + } + /** * Returns true if the provided docs are sorted by score. */ diff --git a/server/src/main/java/org/elasticsearch/snapshots/UpdateIndexShardSnapshotStatusRequest.java b/server/src/main/java/org/elasticsearch/snapshots/UpdateIndexShardSnapshotStatusRequest.java index ccf28b9ee9e2e..b0c00a4cc710d 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/UpdateIndexShardSnapshotStatusRequest.java +++ b/server/src/main/java/org/elasticsearch/snapshots/UpdateIndexShardSnapshotStatusRequest.java @@ -34,11 +34,10 @@ public UpdateIndexShardSnapshotStatusRequest(StreamInput in) throws IOException } public UpdateIndexShardSnapshotStatusRequest(Snapshot snapshot, ShardId shardId, SnapshotsInProgress.ShardSnapshotStatus status) { + super(TimeValue.MAX_VALUE); // By default, keep trying to post snapshot status messages to avoid snapshot processes getting stuck. this.snapshot = snapshot; this.shardId = shardId; this.status = status; - // By default, we keep trying to post snapshot status messages to avoid snapshot processes getting stuck. - this.masterNodeTimeout = TimeValue.timeValueNanos(Long.MAX_VALUE); } @Override diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 6060e1fed1397..06fb23ba14749 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -87,7 +87,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl public static final Setting.AffixSetting REMOTE_CLUSTER_SKIP_UNAVAILABLE = Setting.affixKeySetting( "cluster.remote.", "skip_unavailable", - (ns, key) -> boolSetting(key, false, new RemoteConnectionEnabled<>(ns, key), Setting.Property.Dynamic, Setting.Property.NodeScope) + (ns, key) -> boolSetting(key, true, new RemoteConnectionEnabled<>(ns, key), Setting.Property.Dynamic, Setting.Property.NodeScope) ); public static final Setting.AffixSetting REMOTE_CLUSTER_PING_SCHEDULE = Setting.affixKeySetting( diff --git a/server/src/main/java/org/elasticsearch/watcher/FileWatcher.java b/server/src/main/java/org/elasticsearch/watcher/FileWatcher.java index bc26165432817..3f9cd42504cd5 100644 --- a/server/src/main/java/org/elasticsearch/watcher/FileWatcher.java +++ b/server/src/main/java/org/elasticsearch/watcher/FileWatcher.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.util.CollectionUtils; @@ -17,6 +18,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.attribute.BasicFileAttributes; +import java.security.AccessControlException; import java.util.Arrays; /** @@ -75,20 +77,51 @@ protected void doCheckAndNotify() throws IOException { rootFileObserver.checkAndNotify(); } - private static final FileObserver[] EMPTY_DIRECTORY = new FileObserver[0]; + private static final Observer[] EMPTY_DIRECTORY = new Observer[0]; - private class FileObserver { - private final Path path; + private abstract static class Observer { + final Path path; + boolean exists; + boolean isDirectory; - private boolean exists; + private Observer(Path path) { + this.path = path; + } + + abstract void checkAndNotify() throws IOException; + + abstract void onDirectoryDeleted(); + + abstract void onFileDeleted(); + } + + /** + * A placeholder {@link Observer} for a file that we don't have permissions to access. + * We can't watch it for changes, but it shouldn't block us from watching other files in the same directory. + */ + private static class DeniedObserver extends Observer { + private DeniedObserver(Path path) { + super(path); + } + + @Override + void checkAndNotify() throws IOException {} + + @Override + void onDirectoryDeleted() {} + + @Override + void onFileDeleted() {} + } + + private class FileObserver extends Observer { private long length; private long lastModified; - private boolean isDirectory; - private FileObserver[] children; + private Observer[] children; private byte[] digest; FileObserver(Path path) { - this.path = path; + super(path); } public void checkAndNotify() throws IOException { @@ -199,10 +232,16 @@ private void init(boolean initial) throws IOException { } } - private FileObserver createChild(Path file, boolean initial) throws IOException { - FileObserver child = new FileObserver(file); - child.init(initial); - return child; + private Observer createChild(Path file, boolean initial) throws IOException { + try { + FileObserver child = new FileObserver(file); + child.init(initial); + return child; + } catch (AccessControlException e) { + // don't have permissions, use a placeholder + logger.debug(() -> Strings.format("Don't have permissions to watch path [%s]", file), e); + return new DeniedObserver(file); + } } private Path[] listFiles() throws IOException { @@ -211,10 +250,10 @@ private Path[] listFiles() throws IOException { return files; } - private FileObserver[] listChildren(boolean initial) throws IOException { + private Observer[] listChildren(boolean initial) throws IOException { Path[] files = listFiles(); if (CollectionUtils.isEmpty(files) == false) { - FileObserver[] childObservers = new FileObserver[files.length]; + Observer[] childObservers = new Observer[files.length]; for (int i = 0; i < files.length; i++) { childObservers[i] = createChild(files[i], initial); } @@ -227,7 +266,7 @@ private FileObserver[] listChildren(boolean initial) throws IOException { private void updateChildren() throws IOException { Path[] files = listFiles(); if (CollectionUtils.isEmpty(files) == false) { - FileObserver[] newChildren = new FileObserver[files.length]; + Observer[] newChildren = new Observer[files.length]; int child = 0; int file = 0; while (file < files.length || child < children.length) { @@ -294,7 +333,7 @@ private void onFileCreated(boolean initial) { } } - private void onFileDeleted() { + void onFileDeleted() { for (FileChangesListener listener : listeners()) { try { listener.onFileDeleted(path); @@ -330,7 +369,7 @@ private void onDirectoryCreated(boolean initial) throws IOException { children = listChildren(initial); } - private void onDirectoryDeleted() { + void onDirectoryDeleted() { // First delete all children for (int child = 0; child < children.length; child++) { deleteChild(child); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java index e9e2122c237c6..6098ea777d38a 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java @@ -39,6 +39,7 @@ import java.util.function.Supplier; import static org.elasticsearch.core.TimeValue.timeValueMillis; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; /** * Test for serialization and parsing of {@link ClusterRerouteRequest} and its commands. See the superclass for, well, everything. @@ -202,7 +203,7 @@ private RestRequest toRestRequest(ClusterRerouteRequest original) throws IOExcep params.put("retry_failed", Boolean.toString(original.isRetryFailed())); } if (false == original.masterNodeTimeout().equals(MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT) || randomBoolean()) { - params.put("master_timeout", original.masterNodeTimeout().toString()); + params.put(REST_MASTER_TIMEOUT_PARAM, original.masterNodeTimeout().toString()); } if (original.getCommands() != null) { hasBody = true; diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterActionTests.java index 2604461d12466..2a64fbad97575 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterActionTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesActionTests; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; @@ -50,27 +49,29 @@ public void tearDown() throws Exception { public void testCCSCompatibilityCheck() { Settings settings = Settings.builder() - .put("node.name", TransportFieldCapabilitiesActionTests.class.getSimpleName()) + .put("node.name", TransportResolveClusterActionTests.class.getSimpleName()) .put(SearchService.CCS_VERSION_CHECK_SETTING.getKey(), "true") .build(); ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); - TransportVersion transportVersion = TransportVersionUtils.getNextVersion(TransportVersions.MINIMUM_CCS_VERSION, true); + TransportVersion nextTransportVersion = TransportVersionUtils.getNextVersion(TransportVersions.MINIMUM_CCS_VERSION, true); try { TransportService transportService = MockTransportService.createNewService( Settings.EMPTY, VersionInformation.CURRENT, - transportVersion, + nextTransportVersion, threadPool ); ResolveClusterActionRequest request = new ResolveClusterActionRequest(new String[] { "test" }) { @Override public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - if (out.getTransportVersion().before(transportVersion)) { - throw new IllegalArgumentException("This request isn't serializable before transport version " + transportVersion); - } + throw new UnsupportedOperationException( + "ResolveClusterAction requires at least Transport Version " + + TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED.toReleaseVersion() + + " but was " + + out.getTransportVersion().toReleaseVersion() + ); } }; ClusterService clusterService = new ClusterService( diff --git a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java index 8bfd61b8d5b32..70c6719c67d1b 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java @@ -10,6 +10,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; @@ -83,7 +85,9 @@ public OriginalIndices getOriginalIndices(int shardIndex) { @Override public void sendSearchResponse(SearchResponseSections internalSearchResponse, AtomicArray queryResults) { String scrollId = getRequest().scroll() != null ? TransportSearchHelper.buildScrollId(queryResults) : null; - String searchContextId = getRequest().pointInTimeBuilder() != null ? TransportSearchHelper.buildScrollId(queryResults) : null; + BytesReference searchContextId = getRequest().pointInTimeBuilder() != null + ? new BytesArray(TransportSearchHelper.buildScrollId(queryResults)) + : null; var existing = searchResponse.getAndSet( new SearchResponse( internalSearchResponse, diff --git a/server/src/test/java/org/elasticsearch/action/search/RestOpenPointInTimeActionTests.java b/server/src/test/java/org/elasticsearch/action/search/RestOpenPointInTimeActionTests.java index 0b9abb44b71de..dda977565af45 100644 --- a/server/src/test/java/org/elasticsearch/action/search/RestOpenPointInTimeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/RestOpenPointInTimeActionTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.search; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.test.rest.FakeRestRequest; @@ -30,7 +31,7 @@ public void testMaxConcurrentSearchRequests() { verifyingClient.setExecuteVerifier(((actionType, transportRequest) -> { assertThat(transportRequest, instanceOf(OpenPointInTimeRequest.class)); transportRequests.add((OpenPointInTimeRequest) transportRequest); - return new OpenPointInTimeResponse("n/a"); + return new OpenPointInTimeResponse(new BytesArray("n/a")); })); { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java index 32091780484fa..32157e09e628f 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.index.query.IdsQueryBuilder; @@ -64,7 +65,7 @@ public void testEncode() { aliasFilters.put(result.getSearchShardTarget().getShardId().getIndex().getUUID(), aliasFilter); } } - final String id = SearchContextId.encode(queryResults.asList(), aliasFilters, version); + final BytesReference id = SearchContextId.encode(queryResults.asList(), aliasFilters, version); final SearchContextId context = SearchContextId.decode(namedWriteableRegistry, id); assertThat(context.shards().keySet(), hasSize(3)); assertThat(context.aliasFilter(), equalTo(aliasFilters)); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 9fd2cd1206ee8..d8c7d3e134571 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -13,11 +13,11 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.search.AbstractSearchTestCase; @@ -39,7 +39,9 @@ import org.elasticsearch.test.VersionUtils; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Base64; import java.util.List; import static java.util.Collections.emptyMap; @@ -277,17 +279,6 @@ public void testValidate() throws IOException { assertEquals(1, validationErrors.validationErrors().size()); assertEquals("[slice] can only be used with [scroll] or [point-in-time] requests", validationErrors.validationErrors().get(0)); } - { - // collapse and rescore - SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder()); - searchRequest.scroll((Scroll) null); - searchRequest.source().collapse(new CollapseBuilder("field")); - searchRequest.source().addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder())); - ActionRequestValidationException validationErrors = searchRequest.validate(); - assertNotNull(validationErrors); - assertEquals(1, validationErrors.validationErrors().size()); - assertEquals("cannot use `collapse` in conjunction with `rescore`", validationErrors.validationErrors().get(0)); - } { // stored fields disabled with _source requested SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder()); @@ -343,7 +334,9 @@ public void testValidate() throws IOException { { // Reader context with scroll SearchRequest searchRequest = new SearchRequest().source( - new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder("id")) + new SearchSourceBuilder().pointInTimeBuilder( + new PointInTimeBuilder(new BytesArray(Base64.getUrlEncoder().encode("id".getBytes(StandardCharsets.UTF_8)))) + ) ).scroll(TimeValue.timeValueMillis(randomIntBetween(1, 100))); ActionRequestValidationException validationErrors = searchRequest.validate(); assertNotNull(validationErrors); @@ -517,7 +510,7 @@ public void testValidate() throws IOException { new SearchSourceBuilder().rankBuilder(new TestRankBuilder(100)) .query(QueryBuilders.termQuery("field", "term")) .knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 0f }, 10, 100, null))) - .pointInTimeBuilder(new PointInTimeBuilder("test")) + .pointInTimeBuilder(new PointInTimeBuilder(new BytesArray("test"))) ); ActionRequestValidationException validationErrors = searchRequest.validate(); assertNotNull(validationErrors); @@ -550,7 +543,7 @@ public void testValidate() throws IOException { } { SearchRequest searchRequest = new SearchRequest("test").source( - new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder("")) + new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(BytesArray.EMPTY)) ); ActionRequestValidationException validationErrors = searchRequest.validate(); assertNotNull(validationErrors); @@ -562,7 +555,7 @@ public void testValidate() throws IOException { } { SearchRequest searchRequest = new SearchRequest().indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED) - .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(""))); + .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(BytesArray.EMPTY))); ActionRequestValidationException validationErrors = searchRequest.validate(); assertNotNull(validationErrors); assertEquals(1, validationErrors.validationErrors().size()); @@ -570,7 +563,7 @@ public void testValidate() throws IOException { } { SearchRequest searchRequest = new SearchRequest().routing("route1") - .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(""))); + .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(BytesArray.EMPTY))); ActionRequestValidationException validationErrors = searchRequest.validate(); assertNotNull(validationErrors); assertEquals(1, validationErrors.validationErrors().size()); @@ -578,7 +571,7 @@ public void testValidate() throws IOException { } { SearchRequest searchRequest = new SearchRequest().preference("pref1") - .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(""))); + .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(BytesArray.EMPTY))); ActionRequestValidationException validationErrors = searchRequest.validate(); assertNotNull(validationErrors); assertEquals(1, validationErrors.validationErrors().size()); diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index fea391e8205f5..a35dac8157517 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -469,7 +469,8 @@ private MockTransportService[] startTransport( int numClusters, DiscoveryNode[] nodes, Map remoteIndices, - Settings.Builder settingsBuilder + Settings.Builder settingsBuilder, + boolean skipUnavailable ) { MockTransportService[] mockTransportServices = new MockTransportService[numClusters]; for (int i = 0; i < numClusters; i++) { @@ -486,6 +487,7 @@ private MockTransportService[] startTransport( knownNodes.add(remoteSeedNode); nodes[i] = remoteSeedNode; settingsBuilder.put("cluster.remote.remote" + i + ".seeds", remoteSeedNode.getAddress().toString()); + settingsBuilder.put("cluster.remote.remote" + i + ".skip_unavailable", Boolean.toString(skipUnavailable)); remoteIndices.put("remote" + i, new OriginalIndices(new String[] { "index" }, IndicesOptions.lenientExpandOpen())); } return mockTransportServices; @@ -496,7 +498,8 @@ public void testCCSRemoteReduceMergeFails() throws Exception { DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; Map remoteIndicesByCluster = new HashMap<>(); Settings.Builder builder = Settings.builder(); - MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + boolean skipUnavailable = randomBoolean(); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, skipUnavailable); Settings settings = builder.build(); boolean local = randomBoolean(); OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; @@ -566,7 +569,8 @@ public void testCCSRemoteReduce() throws Exception { DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; Map remoteIndicesByCluster = new HashMap<>(); Settings.Builder builder = Settings.builder(); - MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + boolean skipUnavailable = randomBoolean(); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, skipUnavailable); Settings settings = builder.build(); boolean local = randomBoolean(); OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; @@ -709,7 +713,8 @@ public void testCCSRemoteReduceWhereRemoteClustersFail() throws Exception { DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; Map remoteIndicesByCluster = new HashMap<>(); Settings.Builder builder = Settings.builder(); - MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + boolean skipUnavailable = randomBoolean(); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, skipUnavailable); Settings settings = builder.build(); boolean local = randomBoolean(); OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; @@ -734,10 +739,13 @@ public void testCCSRemoteReduceWhereRemoteClustersFail() throws Exception { final CountDownLatch latch = new CountDownLatch(1); SetOnce>> setOnce = new SetOnce<>(); AtomicReference failure = new AtomicReference<>(); - LatchedActionListener listener = new LatchedActionListener<>( - ActionListener.wrap(r -> fail("no response expected"), failure::set), - latch - ); + LatchedActionListener listener = new LatchedActionListener<>(ActionListener.wrap(r -> { + if (skipUnavailable) { + assertThat(r.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(numClusters)); + } else { + fail("no response expected"); // failure should be returned, not SearchResponse + } + }, failure::set), latch); TaskId parentTaskId = new TaskId("n", 1); SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap()); @@ -763,10 +771,14 @@ public void testCCSRemoteReduceWhereRemoteClustersFail() throws Exception { resolveWithEmptySearchResponse(tuple); } awaitLatch(latch, 5, TimeUnit.SECONDS); - assertNotNull(failure.get()); - assertThat(failure.get(), instanceOf(RemoteTransportException.class)); - RemoteTransportException remoteTransportException = (RemoteTransportException) failure.get(); - assertEquals(RestStatus.NOT_FOUND, remoteTransportException.status()); + if (skipUnavailable) { + assertNull(failure.get()); + } else { + assertNotNull(failure.get()); + assertThat(failure.get(), instanceOf(RemoteTransportException.class)); + RemoteTransportException remoteTransportException = (RemoteTransportException) failure.get(); + assertEquals(RestStatus.NOT_FOUND, remoteTransportException.status()); + } } } finally { @@ -781,7 +793,7 @@ public void testCCSRemoteReduceWithDisconnectedRemoteClusters() throws Exception DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; Map remoteIndicesByCluster = new HashMap<>(); Settings.Builder builder = Settings.builder(); - MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, false); Settings settings = builder.build(); boolean local = randomBoolean(); OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; @@ -1035,7 +1047,7 @@ public void testCollectSearchShards() throws Exception { DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; Map remoteIndicesByCluster = new HashMap<>(); Settings.Builder builder = Settings.builder(); - MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, false); Settings settings = builder.build(); try ( MockTransportService service = MockTransportService.createNewService( diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java index f6f915b0e1a3d..a2b18c3328fd5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.stream.Stream; @@ -38,6 +39,7 @@ import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.DATA_STREAM_CONFIGURATION; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.DEFAULT_GLOBAL_RETENTION; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.MAX_GLOBAL_RETENTION; +import static org.elasticsearch.rest.RestRequest.PATH_RESTRICTED; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -343,6 +345,25 @@ public void testEffectiveRetention() { } } + public void testEffectiveRetentionParams() { + { + ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams(new ToXContent.MapParams(Map.of())); + assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(false)); + } + { + ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams( + new ToXContent.MapParams(Map.of(PATH_RESTRICTED, "not-serverless")) + ); + assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(false)); + } + { + ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams( + new ToXContent.MapParams(Map.of(PATH_RESTRICTED, "serverless")) + ); + assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(true)); + } + } + @Nullable public static DataStreamLifecycle randomLifecycle() { return DataStreamLifecycle.newBuilder() diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java index f60a5a5fc601a..7799c1ff5a34c 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java @@ -902,6 +902,22 @@ public void testTimeValueSerialize() throws Exception { assertEqualityAfterSerialize(timeValue, 1 + out.bytes().length()); } + public void testTimeValueInterning() throws IOException { + try (var bytesOut = new BytesStreamOutput()) { + bytesOut.writeTimeValue(randomBoolean() ? TimeValue.MINUS_ONE : new TimeValue(-1, TimeUnit.MILLISECONDS)); + bytesOut.writeTimeValue(randomBoolean() ? TimeValue.ZERO : new TimeValue(0, TimeUnit.MILLISECONDS)); + bytesOut.writeTimeValue(randomBoolean() ? TimeValue.THIRTY_SECONDS : new TimeValue(30, TimeUnit.SECONDS)); + bytesOut.writeTimeValue(randomBoolean() ? TimeValue.ONE_MINUTE : new TimeValue(1, TimeUnit.MINUTES)); + + try (var in = bytesOut.bytes().streamInput()) { + assertSame(TimeValue.MINUS_ONE, in.readTimeValue()); + assertSame(TimeValue.ZERO, in.readTimeValue()); + assertSame(TimeValue.THIRTY_SECONDS, in.readTimeValue()); + assertSame(TimeValue.ONE_MINUTE, in.readTimeValue()); + } + } + } + private static class TestStreamOutput extends BytesStream { private final BytesStreamOutput output = new BytesStreamOutput(); diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 4e6f702b67252..977ab9bcedd75 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -113,6 +113,8 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.elasticsearch.index.IndexService.IndexCreationContext.CREATE_INDEX; +import static org.elasticsearch.index.IndexServiceTests.closeIndexService; +import static org.elasticsearch.index.shard.IndexShardTestCase.flushAndCloseShardNoCheck; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -240,7 +242,7 @@ public void testWrapperIsBound() throws IOException { IndexService indexService = newIndexService(module); assertTrue(indexService.getReaderWrapper() instanceof Wrapper); assertSame(indexService.getEngineFactory(), module.getEngineFactory()); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testRegisterIndexStore() throws IOException { @@ -265,7 +267,7 @@ public void testRegisterIndexStore() throws IOException { final IndexService indexService = newIndexService(module); assertThat(indexService.getDirectoryFactory(), instanceOf(FooFunction.class)); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testDirectoryWrapper() throws IOException { @@ -311,7 +313,7 @@ public void testDirectoryWrapper() throws IOException { assertThat(((WrappedDirectory) directory).shardRouting, sameInstance(shardRouting)); assertThat(directory, instanceOf(FilterDirectory.class)); - indexService.close("test done", false); + closeIndexService(indexService); } public void testOtherServiceBound() throws IOException { @@ -331,7 +333,7 @@ public void beforeIndexRemoved(IndexService indexService, IndexRemovalReason rea assertEquals(x.getIndex(), index); indexService.getIndexEventListener().beforeIndexRemoved(null, null); assertTrue(atomicBoolean.get()); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testListener() throws IOException { @@ -352,7 +354,7 @@ public void testListener() throws IOException { IndexService indexService = newIndexService(module); assertSame(booleanSetting, indexService.getIndexSettings().getScopedSettings().get(booleanSetting.getKey())); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testAddIndexOperationListener() throws IOException { @@ -383,7 +385,7 @@ public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { l.preIndex(shardId, index); } assertTrue(executed.get()); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testAddSearchOperationListener() throws IOException { @@ -409,7 +411,7 @@ public void onNewReaderContext(ReaderContext readerContext) { l.onNewReaderContext(mock(ReaderContext.class)); } assertTrue(executed.get()); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testAddSimilarity() throws IOException { @@ -435,7 +437,7 @@ public void testAddSimilarity() throws IOException { assertThat(similarity, Matchers.instanceOf(TestSimilarity.class)); assertEquals("my_similarity", similarityService.getSimilarity("my_similarity").name()); assertEquals("there is a key", ((TestSimilarity) similarity).key); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testFrozen() { @@ -496,7 +498,7 @@ public void testForceCustomQueryCache() throws IOException { ); IndexService indexService = newIndexService(module); assertTrue(indexService.cache().query() instanceof CustomQueryCache); - indexService.close("simon says", false); + closeIndexService(indexService); assertThat(liveQueryCaches, empty()); } @@ -509,7 +511,7 @@ public void testDefaultQueryCacheImplIsSelected() throws IOException { IndexModule module = createIndexModule(indexSettings, emptyAnalysisRegistry, indexNameExpressionResolver); IndexService indexService = newIndexService(module); assertTrue(indexService.cache().query() instanceof IndexQueryCache); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testDisableQueryCacheHasPrecedenceOverForceQueryCache() throws IOException { @@ -523,7 +525,7 @@ public void testDisableQueryCacheHasPrecedenceOverForceQueryCache() throws IOExc module.forceQueryCacheProvider((a, b) -> new CustomQueryCache(null)); IndexService indexService = newIndexService(module); assertTrue(indexService.cache().query() instanceof DisabledQueryCache); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testCustomQueryCacheCleanedUpIfIndexServiceCreationFails() { @@ -644,7 +646,7 @@ public void testRegisterCustomRecoveryStateFactory() throws IOException { assertThat(indexService.createRecoveryState(shard, mock(DiscoveryNode.class), mock(DiscoveryNode.class)), is(recoveryState)); - indexService.close("closing", false); + closeIndexService(indexService); } public void testIndexCommitListenerIsBound() throws IOException, ExecutionException, InterruptedException { @@ -694,10 +696,10 @@ public void onIndexCommitDelete(ShardId shardId, IndexCommit deletedCommit) { ).initialize("_node_id", null, -1); IndexService indexService = newIndexService(module); - closeables.add(() -> indexService.close("close index service at end of test", false)); + closeables.add(() -> closeIndexService(indexService)); IndexShard indexShard = indexService.createShard(shardRouting, IndexShardTestCase.NOOP_GCP_SYNCER, RetentionLeaseSyncer.EMPTY); - closeables.add(() -> indexShard.close("close shard at end of test", true)); + closeables.add(() -> flushAndCloseShardNoCheck(indexShard)); indexShard.markAsRecovering("test", new RecoveryState(shardRouting, DiscoveryNodeUtils.create("_node_id", "_node_id"), null)); final PlainActionFuture recoveryFuture = new PlainActionFuture<>(); diff --git a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java index d2304908a933b..06aa88e2de4a2 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -145,7 +145,7 @@ protected void runInternal() {} assertFalse(task.isClosed()); assertTrue(task.isScheduled()); - indexService.close("simon says", false); + closeIndexService(indexService); assertFalse("no shards left", task.mustReschedule()); assertTrue(task.isScheduled()); task.close(); @@ -222,7 +222,7 @@ public void testRefreshTaskIsUpdated() throws Exception { assertTrue(refreshTask.isScheduled()); assertFalse(refreshTask.isClosed()); - indexService.close("simon says", false); + closeIndexService(indexService); assertFalse(refreshTask.isScheduled()); assertTrue(refreshTask.isClosed()); } @@ -260,7 +260,7 @@ public void testFsyncTaskIsRunning() throws Exception { assertTrue(fsyncTask.isScheduled()); assertFalse(fsyncTask.isClosed()); - indexService.close("simon says", false); + closeIndexService(indexService); assertFalse(fsyncTask.isScheduled()); assertTrue(fsyncTask.isClosed()); @@ -459,4 +459,8 @@ public void testUpdateSyncIntervalDynamically() { indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertEquals("20s", indexMetadata.getSettings().get(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey())); } + + public static void closeIndexService(IndexService indexService) throws IOException { + indexService.close("IndexServiceTests#closeIndexService", false); + } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineRecoveryTests.java b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineRecoveryTests.java index 0a7c4fa866008..cd8539e4c1b6a 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineRecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineRecoveryTests.java @@ -26,7 +26,7 @@ public void testRecoverFromNoOp() throws IOException { for (int i = 0; i < nbDocs; i++) { indexDoc(indexShard, "_doc", String.valueOf(i)); } - indexShard.close("test", true); + flushAndCloseShardNoCheck(indexShard); final ShardRouting shardRouting = indexShard.routingEntry(); IndexShard primary = reinitShard( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java index 982a7ed6afaa5..f574d95304c0a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java @@ -692,8 +692,8 @@ public void testFieldValueValidation() throws Exception { assertThat(doc.docs().size(), equalTo(1)); assertNull(doc.docs().get(0).get("field")); assertNotNull(doc.docs().get(0).getField("_ignored")); - IndexableField ignoredFields = doc.docs().get(0).getField("_ignored"); - assertThat(ignoredFields.stringValue(), equalTo("field")); + List ignoredFields = doc.docs().get(0).getFields("_ignored"); + assertTrue(ignoredFields.stream().anyMatch(field -> "field".equals(field.stringValue()))); // null inputs are ignored ParsedDocument nullDoc = defaultMapper.parse(source(b -> b.nullField("field"))); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldMapperTests.java index 7eff2dc73d76f..477f75be4c5a0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldMapperTests.java @@ -8,13 +8,17 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.IndexSearcher; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.Source; +import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; @@ -22,8 +26,6 @@ import java.util.List; import static org.hamcrest.Matchers.containsString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class IgnoredFieldMapperTests extends MetadataMapperTestCase { @@ -54,9 +56,19 @@ public void testDefaults() throws IOException { ); ParsedDocument document = mapper.parse(source(b -> b.field("field", "value"))); List fields = document.rootDoc().getFields(IgnoredFieldMapper.NAME); - assertEquals(1, fields.size()); - assertEquals(IndexOptions.DOCS, fields.get(0).fieldType().indexOptions()); - assertTrue(fields.get(0).fieldType().stored()); + assertEquals(2, fields.size()); + IndexableField stringField = fields.stream() + .filter(field -> DocValuesType.NONE == field.fieldType().docValuesType()) + .findFirst() + .orElseThrow(); + assertEquals(IndexOptions.DOCS, stringField.fieldType().indexOptions()); + assertEquals("field", stringField.stringValue()); + assertEquals(DocValuesType.NONE, stringField.fieldType().docValuesType()); + IndexableField docValues = fields.stream() + .filter(field -> DocValuesType.SORTED_SET == field.fieldType().docValuesType()) + .findFirst() + .orElseThrow(); + assertEquals(IndexOptions.NONE, docValues.fieldType().indexOptions()); } public void testFetchIgnoredFieldValue() throws IOException { @@ -65,8 +77,7 @@ public void testFetchIgnoredFieldValue() throws IOException { iw.addDocument(mapperService.documentMapper().parse(source(b -> b.field("field", "value"))).rootDoc()); }, iw -> { SearchLookup lookup = new SearchLookup(mapperService::fieldType, fieldDataLookup(mapperService), (ctx, doc) -> null); - SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class); - when(searchExecutionContext.lookup()).thenReturn(lookup); + SearchExecutionContext searchExecutionContext = createSearchExecutionContext(mapperService); IgnoredFieldMapper.IgnoredFieldType ft = (IgnoredFieldMapper.IgnoredFieldType) mapperService.fieldType("_ignored"); ValueFetcher valueFetcher = ft.valueFetcher(searchExecutionContext, null); IndexSearcher searcher = newSearcher(iw); @@ -76,4 +87,22 @@ public void testFetchIgnoredFieldValue() throws IOException { }); } + public void testIgnoredFieldType() throws IOException { + IndexVersion version = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.FIRST_DETACHED_INDEX_VERSION, + IndexVersion.current() + ); + boolean afterIntroducingDocValues = version.onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD); + boolean beforeRemovingStoredField = version.before(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD); + MapperService mapperService = createMapperService(version, fieldMapping(b -> b.field("type", "keyword").field("ignore_above", 3))); + withLuceneIndex(mapperService, iw -> { + iw.addDocument(mapperService.documentMapper().parse(source(b -> b.field("field", "value_to_ignore"))).rootDoc()); + }, iw -> { + MappedFieldType mappedFieldType = mapperService.fieldType(IgnoredFieldMapper.NAME); + assertEquals("version = " + version, afterIntroducingDocValues, mappedFieldType.hasDocValues()); + assertEquals("version = " + version, beforeRemovingStoredField, mappedFieldType.isStored()); + }); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java index 52475e7b059b5..520fe8e5ac582 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.Term; +import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.RegexpQuery; @@ -60,4 +61,11 @@ public void testWildcardQuery() { ); assertEquals("[wildcard] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", ee.getMessage()); } + + public void testExistsQuery() { + MappedFieldType ft = IgnoredFieldMapper.FIELD_TYPE; + + Query expected = new FieldExistsQuery(IgnoredFieldMapper.NAME); + assertEquals(expected, ft.existsQuery(MOCK_CONTEXT)); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index 4824bd337f5b0..e06ed1736cca2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -234,8 +234,8 @@ public void testIgnoreAbove() throws IOException { assertEquals(0, fields.size()); fields = doc.rootDoc().getFields("_ignored"); - assertEquals(1, fields.size()); - assertEquals("field", fields.get(0).stringValue()); + assertEquals(2, fields.size()); + assertTrue(doc.rootDoc().getFields("_ignored").stream().anyMatch(field -> "field".equals(field.stringValue()))); } public void testNullValue() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 798f52cfbdc19..330571d53f29a 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -134,10 +134,10 @@ public void testRecoveryToReplicaThatReceivedExtraDocument() throws Exception { new SourceToParse("replica", new BytesArray("{}"), XContentType.JSON) ); shards.promoteReplicaToPrimary(promotedReplica).get(); - oldPrimary.close("demoted", randomBoolean()); + closeShardNoCheck(oldPrimary, randomBoolean()); oldPrimary.store().close(); shards.removeReplica(remainingReplica); - remainingReplica.close("disconnected", false); + closeShardNoCheck(remainingReplica); remainingReplica.store().close(); // randomly introduce a conflicting document final boolean extra = randomBoolean(); @@ -260,7 +260,7 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { newPrimary.flush(new FlushRequest()); } - oldPrimary.close("demoted", false); + closeShardNoCheck(oldPrimary); oldPrimary.store().close(); IndexShard newReplica = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); @@ -306,7 +306,7 @@ public void testReplicaRollbackStaleDocumentsInPeerRecovery() throws Exception { shards.promoteReplicaToPrimary(newPrimary).get(); // Recover a replica should rollback the stale documents shards.removeReplica(replica); - replica.close("recover replica - first time", false); + closeShardNoCheck(replica); replica.store().close(); replica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); shards.recoverReplica(replica); @@ -317,7 +317,7 @@ public void testReplicaRollbackStaleDocumentsInPeerRecovery() throws Exception { assertThat(replica.getLastSyncedGlobalCheckpoint(), equalTo(replica.seqNoStats().getMaxSeqNo())); // Recover a replica again should also rollback the stale documents. shards.removeReplica(replica); - replica.close("recover replica - second time", false); + closeShardNoCheck(replica); replica.store().close(); IndexShard anotherReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); shards.recoverReplica(anotherReplica); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index c9f8372f54793..4577777d139cd 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -276,7 +276,7 @@ public void testFailShard() throws Exception { assertNotNull(shardPath); // fail shard shard.failShard("test shard fail", new CorruptIndexException("", "")); - shard.close("do not assert history", false); + closeShardNoCheck(shard); shard.store().close(); // check state file still exists ShardStateMetadata shardStateMetadata = load(logger, shardPath.getShardStatePath()); @@ -1477,7 +1477,7 @@ public void testSnapshotStore() throws IOException { snapshot = newShard.snapshotStoreMetadata(); assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_3")); - newShard.close("test", false); + closeShardNoCheck(newShard); snapshot = newShard.snapshotStoreMetadata(); assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_3")); @@ -1766,7 +1766,7 @@ public void testIndexingOperationsListeners() throws IOException { AtomicInteger preDelete = new AtomicInteger(); AtomicInteger postDelete = new AtomicInteger(); AtomicInteger postDeleteException = new AtomicInteger(); - shard.close("simon says", true); + flushAndCloseShardNoCheck(shard); shard = reinitShard(shard, new IndexingOperationListener() { @Override public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { @@ -1848,7 +1848,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { assertEquals(1, postDelete.get()); assertEquals(0, postDeleteException.get()); - shard.close("Unexpected close", true); + closeShardNoCheck(shard); shard.state = IndexShardState.STARTED; // It will generate exception try { @@ -4372,7 +4372,7 @@ public void recoverFromTranslog( Thread closeShardThread = new Thread(() -> { try { safeAwait(readyToCloseLatch); - shard.close("testing", false); + closeShardNoCheck(shard); // in integration tests, this is done as a listener on IndexService. MockFSDirectoryFactory.checkIndex(logger, shard.store(), shard.shardId); } catch (IOException e) { @@ -4813,7 +4813,7 @@ public void testCloseShardWhileEngineIsWarming() throws Exception { recoveryThread.start(); try { warmerStarted.await(); - shard.close("testing", false); + closeShardNoCheck(shard); assertThat(shard.state, equalTo(IndexShardState.CLOSED)); } finally { warmerBlocking.countDown(); diff --git a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index 7535f900ff2d1..4c6d6f563b950 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -363,7 +363,7 @@ public void testTranslogRecoveryWorksWithIMC() throws IOException { for (int i = 0; i < 100; i++) { indexDoc(shard, Integer.toString(i), "{\"foo\" : \"bar\"}", XContentType.JSON, null); } - shard.close("simon says", false); + closeShardNoCheck(shard); AtomicReference shardRef = new AtomicReference<>(); Settings settings = Settings.builder().put("indices.memory.index_buffer_size", "50kb").build(); Iterable iterable = () -> (shardRef.get() == null) diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index d27e924110c15..7a31d725e6340 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -372,7 +372,7 @@ public void testResetStartRequestIfTranslogIsCorrupted() throws Exception { DiscoveryNode rNode = DiscoveryNodeUtils.builder("foo").roles(Collections.emptySet()).build(); IndexShard shard = newStartedShard(false); final SeqNoStats seqNoStats = populateRandomData(shard); - shard.close("test", false); + closeShardNoCheck(shard); if (randomBoolean()) { shard.store().associateIndexWithNewTranslog(UUIDs.randomBase64UUID()); } else if (randomBoolean()) { diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java index e53019fd93506..ef8af18322f8d 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java @@ -22,6 +22,8 @@ import java.util.Set; import java.util.regex.Pattern; +import static org.elasticsearch.index.shard.IndexShardTestCase.closeShardNoCheck; + public class RecoveryStatusTests extends ESSingleNodeTestCase { private static final Version MIN_SUPPORTED_LUCENE_VERSION = IndexVersions.MINIMUM_COMPATIBLE.luceneVersion(); @@ -71,7 +73,7 @@ public void testRenameTempFiles() throws IOException { } } assertNotNull(expectedFile); - indexShard.close("foo", false);// we have to close it here otherwise rename fails since the write.lock is held by the engine + closeShardNoCheck(indexShard); // we have to close it here otherwise rename fails since the write.lock is held by the engine multiFileWriter.renameAllTempFiles(); strings = Sets.newHashSet(indexShard.store().directory().listAll()); assertTrue(strings.toString(), strings.contains("foo.bar")); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 20e85c9c6fed8..9590d83c87263 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -231,7 +231,7 @@ public void testDifferentHistoryUUIDDisablesOPsRecovery() throws Exception { final String historyUUID = replica.getHistoryUUID(); Translog.TranslogGeneration translogGeneration = getTranslog(replica).getGeneration(); shards.removeReplica(replica); - replica.close("test", false); + closeShardNoCheck(replica); IndexWriterConfig iwc = new IndexWriterConfig(null).setCommitOnClose(false) // we don't want merges to happen here - we call maybe merge on the engine // later once we stared it up otherwise we would need to wait for it here @@ -355,7 +355,7 @@ public void testSequenceBasedRecoveryKeepsTranslog() throws Exception { if (randomBoolean()) { shards.flush(); } - replica.close("test", randomBoolean()); + closeShardNoCheck(replica, randomBoolean()); replica.store().close(); final IndexShard newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); shards.recoverReplica(newReplica); @@ -472,7 +472,7 @@ public void testRecoveryTrimsLocalTranslog() throws Exception { } shards.syncGlobalCheckpoint(); shards.promoteReplicaToPrimary(randomFrom(shards.getReplicas())).get(); - oldPrimary.close("demoted", false); + closeShardNoCheck(oldPrimary); oldPrimary.store().close(); oldPrimary = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); shards.recoverReplica(oldPrimary); diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java index 7c0d6af4a92cf..ce732a3b95a34 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java @@ -136,7 +136,7 @@ public void testRestoreSnapshotWithExistingFiles() throws IOException { } finally { if (shard != null && shard.state() != IndexShardState.CLOSED) { try { - shard.close("test", false); + closeShardNoCheck(shard); } finally { IOUtils.close(shard.store()); } @@ -205,7 +205,7 @@ public void testSnapshotWithConflictingName() throws Exception { } finally { if (shard != null && shard.state() != IndexShardState.CLOSED) { try { - shard.close("test", false); + closeShardNoCheck(shard); } finally { IOUtils.close(shard.store()); } diff --git a/server/src/test/java/org/elasticsearch/rest/RestUtilsTests.java b/server/src/test/java/org/elasticsearch/rest/RestUtilsTests.java index ca516a00af239..e898b852c6c39 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestUtilsTests.java @@ -9,7 +9,9 @@ package org.elasticsearch.rest; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; import java.util.HashMap; import java.util.Locale; @@ -185,4 +187,19 @@ private void assertCorsSettingRegexMatches(String settingsValue, boolean expectM ); } } + + public void testGetMasterNodeTimeout() { + assertEquals( + TimeValue.timeValueSeconds(30), + RestUtils.getMasterNodeTimeout(new FakeRestRequest.Builder(xContentRegistry()).build()) + ); + + final var timeout = randomTimeValue(); + assertEquals( + timeout, + RestUtils.getMasterNodeTimeout( + new FakeRestRequest.Builder(xContentRegistry()).withParams(Map.of("master_timeout", timeout.getStringRep())).build() + ) + ); + } } diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionActionTests.java index e9de0f76df9cf..4517f9cd353c6 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionActionTests.java @@ -18,6 +18,7 @@ import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; import static org.elasticsearch.rest.action.admin.cluster.RestAddVotingConfigExclusionAction.resolveVotingConfigExclusionsRequest; public class RestAddVotingConfigExclusionActionTests extends ESTestCase { @@ -69,7 +70,7 @@ public void testResolveVotingConfigExclusionsRequestTimeout() { public void testResolveVotingConfigExclusionsRequestMasterTimeout() { Map params = new HashMap<>(); params.put("node_names", "node-1,node-2,node-3"); - params.put("master_timeout", "60s"); + params.put(REST_MASTER_TIMEOUT_PARAM, "60s"); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) .withPath("/_cluster/voting_config_exclusions") .withParams(params) @@ -84,7 +85,7 @@ public void testResolveVotingConfigExclusionsRequestTimeoutAndMasterTimeout() { Map params = new HashMap<>(); params.put("node_names", "node-1,node-2,node-3"); params.put("timeout", "60s"); - params.put("master_timeout", "120s"); + params.put(REST_MASTER_TIMEOUT_PARAM, "120s"); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) .withPath("/_cluster/voting_config_exclusions") .withParams(params) diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionActionTests.java index d77d660e8d2d6..28f1c3d999aef 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionActionTests.java @@ -15,6 +15,7 @@ import java.util.Map; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; import static org.elasticsearch.rest.action.admin.cluster.RestClearVotingConfigExclusionsAction.resolveVotingConfigExclusionsRequest; public class RestClearVotingConfigExclusionActionTests extends ESTestCase { @@ -35,7 +36,7 @@ public void testResolveRequestParameters() { final var request = resolveVotingConfigExclusionsRequest( new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.DELETE) .withPath("/_cluster/voting_config_exclusions") - .withParams(Map.of("master_timeout", "60s", "wait_for_removal", "false")) + .withParams(Map.of(REST_MASTER_TIMEOUT_PARAM, "60s", "wait_for_removal", "false")) .build() ); assertEquals(TimeValue.timeValueMinutes(1), request.masterNodeTimeout()); diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthActionTests.java index 15f84c5e455b8..c3c1213f320bc 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthActionTests.java @@ -23,6 +23,7 @@ import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.object.HasToString.hasToString; @@ -45,7 +46,7 @@ public void testFromRequest() { params.put("index", index); params.put("local", String.valueOf(local)); - params.put("master_timeout", masterTimeout.getStringRep()); + params.put(REST_MASTER_TIMEOUT_PARAM, masterTimeout.getStringRep()); params.put("timeout", timeout.getStringRep()); params.put("wait_for_status", waitForStatus.name()); if (waitForNoRelocatingShards || randomBoolean()) { diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 3c81fd60fe25c..26c3f5831ec8c 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -44,6 +44,8 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -100,7 +102,6 @@ import org.elasticsearch.search.query.NonCountingTermQuery; import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.slice.SliceBuilder; import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.tasks.TaskCancelHelper; @@ -1841,7 +1842,7 @@ public void testMinimalSearchSourceInShardRequests() { } indicesAdmin().prepareRefresh("test").get(); - String pitId = client().execute( + BytesReference pitId = client().execute( TransportOpenPointInTimeAction.TYPE, new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(10)) ).actionGet().getPointInTimeId(); @@ -1863,7 +1864,7 @@ public void testMinimalSearchSourceInShardRequests() { for (ShardSearchRequest shardRequest : shardRequests) { assertNotNull(shardRequest.source()); assertNotNull(shardRequest.source().pointInTimeBuilder()); - assertThat(shardRequest.source().pointInTimeBuilder().getEncodedId(), equalTo("")); + assertThat(shardRequest.source().pointInTimeBuilder().getEncodedId(), equalTo(BytesArray.EMPTY)); } } @@ -2187,13 +2188,6 @@ public void testParseSourceValidation() { service ); } - { - // collapse and rescore - SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); - searchRequest.source().collapse(new CollapseBuilder("field")); - searchRequest.source().addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder())); - assertCreateContextValidation(searchRequest, "cannot use `collapse` in conjunction with `rescore`", indexService, service); - } { // stored fields disabled with _source requested SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); diff --git a/server/src/test/java/org/elasticsearch/search/builder/PointInTimeBuilderTests.java b/server/src/test/java/org/elasticsearch/search/builder/PointInTimeBuilderTests.java index 7c26ddbbbdb7e..ebf03f544fca1 100644 --- a/server/src/test/java/org/elasticsearch/search/builder/PointInTimeBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/builder/PointInTimeBuilderTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.search.builder; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.xcontent.XContentParser; @@ -27,7 +28,7 @@ protected Writeable.Reader instanceReader() { @Override protected PointInTimeBuilder createTestInstance() { - final PointInTimeBuilder pointInTime = new PointInTimeBuilder(randomAlphaOfLength(20)); + final PointInTimeBuilder pointInTime = new PointInTimeBuilder(new BytesArray(randomAlphaOfLength(20))); if (randomBoolean()) { pointInTime.setKeepAlive(randomTimeValue()); } diff --git a/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index c2c544c52b202..d92cc202980d2 100644 --- a/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.admin.cluster.stats.SearchUsageStats; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentHelper; @@ -62,6 +63,8 @@ import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Base64; import java.util.Collections; import java.util.List; import java.util.Map; @@ -436,7 +439,7 @@ public void testToXContentWithPointInTime() throws IOException { XContentType xContentType = randomFrom(XContentType.values()); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); TimeValue keepAlive = randomBoolean() ? TimeValue.timeValueHours(1) : null; - searchSourceBuilder.pointInTimeBuilder(new PointInTimeBuilder("id").setKeepAlive(keepAlive)); + searchSourceBuilder.pointInTimeBuilder(new PointInTimeBuilder(new BytesArray("id")).setKeepAlive(keepAlive)); XContentBuilder builder = XContentFactory.contentBuilder(xContentType); searchSourceBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS); BytesReference bytes = BytesReference.bytes(builder); @@ -444,7 +447,10 @@ public void testToXContentWithPointInTime() throws IOException { assertEquals(1, sourceAsMap.size()); @SuppressWarnings("unchecked") Map pit = (Map) sourceAsMap.get("pit"); - assertEquals("id", pit.get("id")); + assertEquals( + new String(Base64.getUrlEncoder().encode("id".getBytes(StandardCharsets.UTF_8)), StandardCharsets.ISO_8859_1), + pit.get("id") + ); if (keepAlive != null) { assertEquals("1h", pit.get("keep_alive")); assertEquals(2, pit.size()); @@ -771,7 +777,7 @@ public void testSearchSectionsUsageCollection() throws IOException { // these are not correct runtime mappings but they are counted compared to empty object searchSourceBuilder.runtimeMappings(Collections.singletonMap("field", "keyword")); searchSourceBuilder.knnSearch(List.of(new KnnSearchBuilder("field", new float[] {}, 2, 5, null))); - searchSourceBuilder.pointInTimeBuilder(new PointInTimeBuilder("pitid")); + searchSourceBuilder.pointInTimeBuilder(new PointInTimeBuilder(new BytesArray("pitid"))); searchSourceBuilder.docValueField("field"); searchSourceBuilder.storedField("field"); searchSourceBuilder.explain(true); diff --git a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java index ba1453e464c64..cbbb48711cc8d 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; @@ -97,7 +98,7 @@ private IndexSettings createIndexSettings(IndexVersion indexVersionCreated) { private ShardSearchRequest createPointInTimeRequest(int shardIndex, int numShards) { SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true) - .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder("1m"))); + .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(new BytesArray("1m")))); return new ShardSearchRequest( OriginalIndices.NONE, searchRequest, diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java index bfd626dd3d153..bb18420276190 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java @@ -145,6 +145,7 @@ public void testEnsureWeReconnect() throws Exception { Settings localSettings = Settings.builder() .put(onlyRole(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)) .put("cluster.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()) + .put("cluster.remote.test.skip_unavailable", "false") // ensureConnected is only true for skip_unavailable=false .build(); try ( MockTransportService service = MockTransportService.createNewService( diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 29a5d5a34e37f..9f70ab879cb25 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -1282,7 +1282,7 @@ public void testSkipUnavailable() { service.start(); service.acceptIncomingRequests(); - assertFalse(service.getRemoteClusterService().isSkipUnavailable("cluster1")); + assertTrue(service.getRemoteClusterService().isSkipUnavailable("cluster1")); if (randomBoolean()) { updateSkipUnavailable(service.getRemoteClusterService(), "cluster1", false); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java index c61dc93f962c6..be474b4a5d530 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java @@ -68,7 +68,7 @@ public void testRemoveRemoteClusterClientRole() { public void testSkipUnavailableDefault() { final String alias = randomAlphaOfLength(8); - assertFalse(REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).get(Settings.EMPTY)); + assertTrue(REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).get(Settings.EMPTY)); } public void testSeedsDefault() { diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index a8acabaa2914d..3aed133c590f7 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -174,7 +174,8 @@ public class BootstrapForTesting { perms, getPluginPermissions(), true, - Security.toFilePermissions(fastPathPermissions) + Security.toFilePermissions(fastPathPermissions), + List.of() ); Policy.setPolicy(new Policy() { @Override diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperScriptTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperScriptTestCase.java index 368de3e4d6e58..68d326be0dc83 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperScriptTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperScriptTestCase.java @@ -139,7 +139,7 @@ public final void testOnScriptErrorContinue() throws IOException { ParsedDocument doc = mapper.parse(source(b -> b.field("message", "this is some text"))); assertThat(doc.rootDoc().getFields("message_error"), hasSize(0)); - assertThat(doc.rootDoc().getField("_ignored").stringValue(), equalTo("message_error")); + assertTrue(doc.rootDoc().getFields("_ignored").stream().anyMatch(field -> "message_error".equals(field.stringValue()))); } public final void testRejectScriptErrors() throws IOException { diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 84b1f0a85eed2..b662e44c4b8de 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -681,7 +681,7 @@ protected void closeShard(IndexShard shard, boolean assertConsistencyBetweenTran EngineTestCase.assertAtMostOneLuceneDocumentPerSequenceNumber(engine); } } finally { - IOUtils.close(() -> shard.close("test", false), shard.store()); + IOUtils.close(() -> closeShardNoCheck(shard), shard.store()); } } @@ -693,6 +693,27 @@ protected void closeShards(Iterable shards) throws IOException { } } + /** + * Close an {@link IndexShard}, optionally flushing first, without performing the consistency checks that {@link #closeShard} performs. + */ + public static void closeShardNoCheck(IndexShard indexShard, boolean flushEngine) throws IOException { + indexShard.close("IndexShardTestCase#closeShardNoCheck", flushEngine); + } + + /** + * Close an {@link IndexShard} without flushing or performing the consistency checks that {@link #closeShard} performs. + */ + public static void closeShardNoCheck(IndexShard indexShard) throws IOException { + closeShardNoCheck(indexShard, false); + } + + /** + * Flush and close an {@link IndexShard}, without performing the consistency checks that {@link #closeShard} performs. + */ + public static void flushAndCloseShardNoCheck(IndexShard indexShard) throws IOException { + closeShardNoCheck(indexShard, true); + } + protected void recoverShardFromStore(IndexShard primary) throws IOException { primary.markAsRecovering( "store", diff --git a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java index 3dfd9af485241..b3e9c10aed886 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.XContentHelper; @@ -44,7 +45,9 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Base64; import java.util.List; import java.util.Map; import java.util.function.Supplier; @@ -379,7 +382,9 @@ public static SearchSourceBuilder randomSearchSourceBuilder( builder.collapse(randomCollapseBuilder.get()); } if (randomBoolean()) { - PointInTimeBuilder pit = new PointInTimeBuilder(randomAlphaOfLengthBetween(3, 10)); + PointInTimeBuilder pit = new PointInTimeBuilder( + new BytesArray(Base64.getUrlEncoder().encode(randomAlphaOfLengthBetween(3, 10).getBytes(StandardCharsets.UTF_8))) + ); if (randomBoolean()) { pit.setKeepAlive(TimeValue.timeValueMinutes(randomIntBetween(1, 60))); } diff --git a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java index 91bee1ee253e9..2639aafae1300 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.client.Response; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.text.Text; @@ -52,6 +53,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Base64; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; @@ -201,7 +203,7 @@ public static SearchResponse parseInnerSearchResponse(XContentParser parser) thr int totalShards = -1; int skippedShards = 0; // 0 for BWC String scrollId = null; - String searchContextId = null; + BytesReference searchContextId = null; List failures = new ArrayList<>(); SearchResponse.Clusters clusters = SearchResponse.Clusters.EMPTY; for (XContentParser.Token token = parser.nextToken(); token != XContentParser.Token.END_OBJECT; token = parser.nextToken()) { @@ -211,7 +213,7 @@ public static SearchResponse parseInnerSearchResponse(XContentParser parser) thr if (SearchResponse.SCROLL_ID.match(currentFieldName, parser.getDeprecationHandler())) { scrollId = parser.text(); } else if (SearchResponse.POINT_IN_TIME_ID.match(currentFieldName, parser.getDeprecationHandler())) { - searchContextId = parser.text(); + searchContextId = new BytesArray(Base64.getUrlDecoder().decode(parser.text())); } else if (SearchResponse.TOOK.match(currentFieldName, parser.getDeprecationHandler())) { tookInMillis = parser.longValue(); } else if (SearchResponse.TIMED_OUT.match(currentFieldName, parser.getDeprecationHandler())) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 1056c766e17ca..fb6105005201f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -958,7 +958,8 @@ private ClusterHealthStatus ensureColor( String color = clusterHealthStatus.name().toLowerCase(Locale.ROOT); String method = "ensure" + Strings.capitalize(color); - ClusterHealthRequest healthRequest = new ClusterHealthRequest(indices).timeout(timeout) + ClusterHealthRequest healthRequest = new ClusterHealthRequest(indices).masterNodeTimeout(timeout) + .timeout(timeout) .waitForStatus(clusterHealthStatus) .waitForEvents(Priority.LANGUID) .waitForNoRelocatingShards(true) diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 0f3c3dd9b7263..5abca85ac0f42 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -412,7 +412,8 @@ public ClusterHealthStatus ensureGreen(String... indices) { */ public ClusterHealthStatus ensureGreen(TimeValue timeout, String... indices) { ClusterHealthResponse actionGet = clusterAdmin().health( - new ClusterHealthRequest(indices).timeout(timeout) + new ClusterHealthRequest(indices).masterNodeTimeout(timeout) + .timeout(timeout) .waitForGreenStatus() .waitForEvents(Priority.LANGUID) .waitForNoRelocatingShards(true) diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java index f4c9aaa619911..1ee447da1f111 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.elasticsearch.test.rest.yaml.Features; import org.elasticsearch.xcontent.XContentLocation; @@ -20,9 +21,13 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; +import java.util.function.Consumer; import java.util.function.Predicate; +import static java.util.Collections.emptyList; + /** * Represents a section where prerequisites to run a specific test section or suite are specified. It is possible to specify preconditions * as a set of `skip` criteria (the test or suite will be skipped if the specified conditions are met) or `requires` criteria (the test or @@ -34,13 +39,18 @@ * - an operating system (full name, including specific Linux distributions) - some OS might show a certain behavior */ public class PrerequisiteSection { + record KnownIssue(String clusterFeature, String fixedBy) { + private static final Set FIELD_NAMES = Set.of("cluster_feature", "fixed_by"); + } + static class PrerequisiteSectionBuilder { String skipVersionRange = null; String skipReason = null; String requiresReason = null; List requiredYamlRunnerFeatures = new ArrayList<>(); List skipOperatingSystems = new ArrayList<>(); - + List skipKnownIssues = new ArrayList<>(); + String skipAwaitsFix = null; Set skipClusterFeatures = new HashSet<>(); Set requiredClusterFeatures = new HashSet<>(); @@ -53,6 +63,11 @@ enum XPackRequired { XPackRequired xpackRequired = XPackRequired.NOT_SPECIFIED; + public PrerequisiteSectionBuilder skipIfAwaitsFix(String bugUrl) { + this.skipAwaitsFix = bugUrl; + return this; + } + public PrerequisiteSectionBuilder skipIfVersion(String skipVersionRange) { this.skipVersionRange = skipVersionRange; return this; @@ -96,6 +111,11 @@ public PrerequisiteSectionBuilder skipIfClusterFeature(String featureName) { return this; } + public PrerequisiteSectionBuilder skipKnownIssue(KnownIssue knownIssue) { + skipKnownIssues.add(knownIssue); + return this; + } + public PrerequisiteSectionBuilder requireClusterFeature(String featureName) { requiredClusterFeatures.add(featureName); return this; @@ -107,29 +127,30 @@ public PrerequisiteSectionBuilder skipIfOs(String osName) { } void validate(XContentLocation contentLocation) { - if ((Strings.hasLength(skipVersionRange) == false) + if ((Strings.isEmpty(skipVersionRange)) && requiredYamlRunnerFeatures.isEmpty() && skipOperatingSystems.isEmpty() && xpackRequired == XPackRequired.NOT_SPECIFIED && requiredClusterFeatures.isEmpty() - && skipClusterFeatures.isEmpty()) { - throw new ParsingException( - contentLocation, - "at least one criteria (version, cluster features, runner features, os) is mandatory within a skip section" - ); + && skipClusterFeatures.isEmpty() + && skipKnownIssues.isEmpty() + && Strings.isEmpty(skipAwaitsFix)) { + // TODO separate the validation for requires / skip when dropping parsing of legacy fields, e.g. features in skip + throw new ParsingException(contentLocation, "at least one predicate is mandatory within a skip or requires section"); } - if (Strings.hasLength(skipVersionRange) && Strings.hasLength(skipReason) == false) { - throw new ParsingException(contentLocation, "reason is mandatory within skip version section"); - } - if (skipOperatingSystems.isEmpty() == false && Strings.hasLength(skipReason) == false) { - throw new ParsingException(contentLocation, "reason is mandatory within skip os section"); - } - if (skipClusterFeatures.isEmpty() == false && Strings.hasLength(skipReason) == false) { - throw new ParsingException(contentLocation, "reason is mandatory within skip cluster_features section"); + + if (Strings.isEmpty(skipReason) + && (Strings.isEmpty(skipVersionRange) + && skipOperatingSystems.isEmpty() + && skipClusterFeatures.isEmpty() + && skipKnownIssues.isEmpty()) == false) { + throw new ParsingException(contentLocation, "reason is mandatory within this skip section"); } - if (requiredClusterFeatures.isEmpty() == false && Strings.hasLength(requiresReason) == false) { - throw new ParsingException(contentLocation, "reason is mandatory within requires cluster_features section"); + + if (Strings.isEmpty(requiresReason) && (requiredClusterFeatures.isEmpty() == false)) { + throw new ParsingException(contentLocation, "reason is mandatory within this requires section"); } + // make feature "skip_os" mandatory if os is given, this is a temporary solution until language client tests know about os if (skipOperatingSystems.isEmpty() == false && requiredYamlRunnerFeatures.contains("skip_os") == false) { throw new ParsingException(contentLocation, "if os is specified, test runner feature [skip_os] must be set"); @@ -143,33 +164,49 @@ void validate(XContentLocation contentLocation) { } public PrerequisiteSection build() { - final List> skipCriteriaList = new ArrayList<>(); - final List> requiresCriteriaList; - - // Check if the test runner supports all YAML framework features (see {@link Features}). If not, default to always skip this - // section. if (Features.areAllSupported(requiredYamlRunnerFeatures) == false) { - requiresCriteriaList = List.of(Prerequisites.FALSE); - } else { - requiresCriteriaList = new ArrayList<>(); - if (xpackRequired == XPackRequired.YES) { - requiresCriteriaList.add(Prerequisites.hasXPack()); - } - if (xpackRequired == XPackRequired.NO) { - skipCriteriaList.add(Prerequisites.hasXPack()); - } - if (Strings.hasLength(skipVersionRange)) { - skipCriteriaList.add(Prerequisites.skipOnVersionRange(skipVersionRange)); - } - if (skipOperatingSystems.isEmpty() == false) { - skipCriteriaList.add(Prerequisites.skipOnOsList(skipOperatingSystems)); - } - if (requiredClusterFeatures.isEmpty() == false) { - requiresCriteriaList.add(Prerequisites.requireClusterFeatures(requiredClusterFeatures)); - } - if (skipClusterFeatures.isEmpty() == false) { - skipCriteriaList.add(Prerequisites.skipOnClusterFeatures(skipClusterFeatures)); - } + // always skip this section due to missing required test runner features (see {@link Features}) + return new PrerequisiteSection( + emptyList(), + skipReason, + List.of(Prerequisites.FALSE), + requiresReason, + requiredYamlRunnerFeatures + ); + } + if (Strings.hasLength(skipAwaitsFix)) { + // always skip this section due to a pending fix + return new PrerequisiteSection( + List.of(Prerequisites.TRUE), + skipReason, + emptyList(), + requiresReason, + requiredYamlRunnerFeatures + ); + } + + final List> skipCriteriaList = new ArrayList<>(); + final List> requiresCriteriaList = new ArrayList<>(); + if (xpackRequired == XPackRequired.YES) { + requiresCriteriaList.add(Prerequisites.hasXPack()); + } + if (xpackRequired == XPackRequired.NO) { + skipCriteriaList.add(Prerequisites.hasXPack()); + } + if (Strings.hasLength(skipVersionRange)) { + skipCriteriaList.add(Prerequisites.skipOnVersionRange(skipVersionRange)); + } + if (skipOperatingSystems.isEmpty() == false) { + skipCriteriaList.add(Prerequisites.skipOnOsList(skipOperatingSystems)); + } + if (requiredClusterFeatures.isEmpty() == false) { + requiresCriteriaList.add(Prerequisites.requireClusterFeatures(requiredClusterFeatures)); + } + if (skipClusterFeatures.isEmpty() == false) { + skipCriteriaList.add(Prerequisites.skipOnClusterFeatures(skipClusterFeatures)); + } + if (skipKnownIssues.isEmpty() == false) { + skipCriteriaList.add(Prerequisites.skipOnKnownIssue(skipKnownIssues)); } return new PrerequisiteSection(skipCriteriaList, skipReason, requiresCriteriaList, requiresReason, requiredYamlRunnerFeatures); } @@ -228,97 +265,106 @@ private static void parseFeatureField(String feature, PrerequisiteSectionBuilder // package private for tests static void parseSkipSection(XContentParser parser, PrerequisiteSectionBuilder builder) throws IOException { - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new IllegalArgumentException( - "Expected [" - + XContentParser.Token.START_OBJECT - + ", found [" - + parser.currentToken() - + "], the skip section is not properly indented" - ); - } - String currentFieldName = null; - XContentParser.Token token; - - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if ("version".equals(currentFieldName)) { - builder.skipIfVersion(parser.text()); - } else if ("reason".equals(currentFieldName)) { - builder.setSkipReason(parser.text()); - } else if ("features".equals(currentFieldName)) { - parseFeatureField(parser.text(), builder); - } else if ("os".equals(currentFieldName)) { - builder.skipIfOs(parser.text()); - } else if ("cluster_features".equals(currentFieldName)) { - builder.skipIfClusterFeature(parser.text()); - } else { - throw new ParsingException( - parser.getTokenLocation(), - "field " + currentFieldName + " not supported within skip section" - ); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if ("features".equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - parseFeatureField(parser.text(), builder); - } - } else if ("os".equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - builder.skipIfOs(parser.text()); - } - } else if ("cluster_features".equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - builder.skipIfClusterFeature(parser.text()); - } - } + requireStartObject("skip", parser.nextToken()); + + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + if (parser.currentToken() == XContentParser.Token.FIELD_NAME) continue; + + boolean valid = false; + if (parser.currentToken().isValue()) { + valid = switch (parser.currentName()) { + case "version" -> parseString(parser, builder::skipIfVersion); + case "reason" -> parseString(parser, builder::setSkipReason); + case "features" -> parseString(parser, f -> parseFeatureField(f, builder)); + case "os" -> parseString(parser, builder::skipIfOs); + case "cluster_features" -> parseString(parser, builder::skipIfClusterFeature); + case "awaits_fix" -> parseString(parser, builder::skipIfAwaitsFix); + default -> false; + }; + } else if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + valid = switch (parser.currentName()) { + case "features" -> parseStrings(parser, f -> parseFeatureField(f, builder)); + case "os" -> parseStrings(parser, builder::skipIfOs); + case "cluster_features" -> parseStrings(parser, builder::skipIfClusterFeature); + case "known_issues" -> parseArray(parser, PrerequisiteSection::parseKnownIssue, builder::skipKnownIssue); + default -> false; + }; } + if (valid == false) throwUnexpectedField("skip", parser); } parser.nextToken(); } - static void parseRequiresSection(XContentParser parser, PrerequisiteSectionBuilder builder) throws IOException { - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + private static void throwUnexpectedField(String section, XContentParser parser) throws IOException { + throw new ParsingException( + parser.getTokenLocation(), + Strings.format("field [%s] of type [%s] not supported within %s section", parser.currentName(), parser.currentToken(), section) + ); + } + + private static void requireStartObject(String section, XContentParser.Token token) throws IOException { + if (token != XContentParser.Token.START_OBJECT) { throw new IllegalArgumentException( - "Expected [" - + XContentParser.Token.START_OBJECT - + ", found [" - + parser.currentToken() - + "], the requires section is not properly indented" + Strings.format( + "Expected [%s], found [%s], the %s section is not properly indented", + XContentParser.Token.START_OBJECT, + token, + section + ) + ); + } + } + + private static boolean parseString(XContentParser parser, Consumer consumer) throws IOException { + consumer.accept(parser.text()); + return true; + } + + private static boolean parseStrings(XContentParser parser, Consumer consumer) throws IOException { + return parseArray(parser, XContentParser::text, consumer); + } + + private static boolean parseArray(XContentParser parser, CheckedFunction item, Consumer consumer) + throws IOException { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + consumer.accept(item.apply(parser)); + } + return true; + } + + private static KnownIssue parseKnownIssue(XContentParser parser) throws IOException { + Map fields = parser.mapStrings(); + if (fields.keySet().equals(KnownIssue.FIELD_NAMES) == false) { + throw new ParsingException( + parser.getTokenLocation(), + Strings.format("Expected fields %s, but got %s", KnownIssue.FIELD_NAMES, fields.keySet()) ); } - String currentFieldName = null; - XContentParser.Token token; - - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if ("reason".equals(currentFieldName)) { - builder.setRequiresReason(parser.text()); - } else if ("test_runner_features".equals(currentFieldName)) { - parseFeatureField(parser.text(), builder); - } else if ("cluster_features".equals(currentFieldName)) { - builder.requireClusterFeature(parser.text()); - } else { - throw new ParsingException( - parser.getTokenLocation(), - "field " + currentFieldName + " not supported within requires section" - ); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if ("test_runner_features".equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - parseFeatureField(parser.text(), builder); - } - } else if ("cluster_features".equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - builder.requireClusterFeature(parser.text()); - } - } + return new KnownIssue(fields.get("cluster_feature"), fields.get("fixed_by")); + } + + static void parseRequiresSection(XContentParser parser, PrerequisiteSectionBuilder builder) throws IOException { + requireStartObject("requires", parser.nextToken()); + + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + if (parser.currentToken() == XContentParser.Token.FIELD_NAME) continue; + + boolean valid = false; + if (parser.currentToken().isValue()) { + valid = switch (parser.currentName()) { + case "reason" -> parseString(parser, builder::setRequiresReason); + case "test_runner_features" -> parseString(parser, f -> parseFeatureField(f, builder)); + case "cluster_features" -> parseString(parser, builder::requireClusterFeature); + default -> false; + }; + } else if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + valid = switch (parser.currentName()) { + case "test_runner_features" -> parseStrings(parser, f -> parseFeatureField(f, builder)); + case "cluster_features" -> parseStrings(parser, builder::requireClusterFeature); + default -> false; + }; } + if (valid == false) throwUnexpectedField("requires", parser); } parser.nextToken(); } @@ -332,9 +378,9 @@ static void parseRequiresSection(XContentParser parser, PrerequisiteSectionBuild final String requireReason; private PrerequisiteSection() { - this.skipCriteriaList = new ArrayList<>(); - this.requiresCriteriaList = new ArrayList<>(); - this.yamlRunnerFeatures = new ArrayList<>(); + this.skipCriteriaList = emptyList(); + this.requiresCriteriaList = emptyList(); + this.yamlRunnerFeatures = emptyList(); this.skipReason = null; this.requireReason = null; } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/Prerequisites.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/Prerequisites.java index 8049c227b199e..ca10101a4612c 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/Prerequisites.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/Prerequisites.java @@ -44,4 +44,9 @@ static Predicate requireClusterFeatures(Set skipOnClusterFeatures(Set clusterFeatures) { return context -> clusterFeatures.stream().anyMatch(context::clusterHasFeature); } + + static Predicate skipOnKnownIssue(List knownIssues) { + return context -> knownIssues.stream() + .anyMatch(i -> context.clusterHasFeature(i.clusterFeature()) && context.clusterHasFeature(i.fixedBy()) == false); + } } diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index 2c6e7e30e0d46..108a85b978af3 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -33,7 +33,7 @@ public void testWrongIndentation() throws Exception { assertEquals("Error parsing test named [First test section]", e.getMessage()); assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); assertEquals( - "Expected [START_OBJECT, found [VALUE_NULL], the skip section is not properly indented", + "Expected [START_OBJECT], found [VALUE_NULL], the skip section is not properly indented", e.getCause().getMessage() ); } diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index 1f5bdc71dde37..f8927f76c07ec 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -468,11 +468,41 @@ public void testParseSkipOs() throws Exception { assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().hasYamlRunnerFeature("skip_os"), equalTo(true)); } + public void testMuteUsingAwaitsFix() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + "Mute": + + - skip: + awaits_fix: bugurl + + - do: + indices.get_mapping: + index: test_index + type: test_type + + - match: {test_type.properties.text.type: string} + - match: {test_type.properties.text.analyzer: whitespace} + """); + + ClientYamlTestSuite restTestSuite = ClientYamlTestSuite.parse(getTestClass().getName(), getTestName(), Optional.empty(), parser); + + assertThat(restTestSuite, notNullValue()); + assertThat(restTestSuite.getName(), equalTo(getTestName())); + assertThat(restTestSuite.getFile().isPresent(), equalTo(false)); + assertThat(restTestSuite.getTestSections().size(), equalTo(1)); + + assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Mute")); + assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().isEmpty(), equalTo(false)); + } + public void testParseSkipAndRequireClusterFeatures() throws Exception { parser = createParser(YamlXContent.yamlXContent, """ "Broken on some os": - skip: + known_issues: + - cluster_feature: buggy_feature + fixed_by: buggy_feature_fix cluster_features: [unsupported-feature1, unsupported-feature2] reason: "unsupported-features are not supported" - requires: diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java index 181ec34fefb7e..a77b2cc5b40f1 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; +import org.elasticsearch.test.rest.yaml.section.PrerequisiteSection.KnownIssue; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.yaml.YamlXContent; import org.junit.AssumptionViolatedException; @@ -34,6 +35,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.oneOf; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -151,6 +153,11 @@ public void testSkipTestFeaturesOverridesAnySkipCriteria() { assertFalse(section.requiresCriteriaMet(mockContext)); } + public void testSkipAwaitsFix() { + PrerequisiteSection section = new PrerequisiteSection.PrerequisiteSectionBuilder().skipIfAwaitsFix("bugurl").build(); + assertTrue(section.skipCriteriaMet(mock(ClientYamlTestExecutionContext.class))); + } + public void testSkipOs() { PrerequisiteSection section = new PrerequisiteSection.PrerequisiteSectionBuilder().skipIfOs("windows95") .skipIfOs("debian-5") @@ -306,6 +313,57 @@ public void testParseSkipSectionBothFeatureAndVersion() throws Exception { assertThat(skipSectionBuilder.skipReason, equalTo("Delete ignores the parent param")); } + public void testParseSkipSectionAwaitsFix() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + skip: + awaits_fix: "bugurl" + """); + + var skipSectionBuilder = PrerequisiteSection.parseInternal(parser); + assertThat(skipSectionBuilder.skipAwaitsFix, is("bugurl")); + } + + public void testParseSkipSectionKnownIssues() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + skip: + reason: "skip known bugs" + known_issues: + - cluster_feature: feature1 + fixed_by: featureFix1 + - cluster_feature: feature2 + fixed_by: featureFix2"""); + + var skipSectionBuilder = PrerequisiteSection.parseInternal(parser); + assertThat(skipSectionBuilder.skipReason, is("skip known bugs")); + assertThat( + skipSectionBuilder.skipKnownIssues, + contains( + new KnownIssue("feature1", "featureFix1"), // + new KnownIssue("feature2", "featureFix2") + ) + ); + } + + public void testParseSkipSectionIncompleteKnownIssues() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + skip: + reason: "skip known bugs" + known_issues: + - cluster_feature: feature1"""); + + Exception e = expectThrows(ParsingException.class, () -> PrerequisiteSection.parseInternal(parser)); + parser = null; // parser is not fully consumed, prevent validation + assertThat( + e.getMessage(), + is( + oneOf( + ("Expected fields [cluster_feature, fixed_by], but got [cluster_feature]"), + ("Expected fields [fixed_by, cluster_feature], but got [cluster_feature]") + ) + ) + ); + } + public void testParseSkipSectionNoReason() throws Exception { parser = createParser(YamlXContent.yamlXContent, """ skip: @@ -313,7 +371,7 @@ public void testParseSkipSectionNoReason() throws Exception { """); Exception e = expectThrows(ParsingException.class, () -> PrerequisiteSection.parseInternal(parser)); - assertThat(e.getMessage(), is("reason is mandatory within skip version section")); + assertThat(e.getMessage(), is("reason is mandatory within this skip section")); } public void testParseSkipSectionNoVersionNorFeature() throws Exception { @@ -323,10 +381,7 @@ public void testParseSkipSectionNoVersionNorFeature() throws Exception { """); Exception e = expectThrows(ParsingException.class, () -> PrerequisiteSection.parseInternal(parser)); - assertThat( - e.getMessage(), - is("at least one criteria (version, cluster features, runner features, os) is mandatory within a skip section") - ); + assertThat(e.getMessage(), is("at least one predicate is mandatory within a skip or requires section")); } public void testParseSkipSectionOsNoVersion() throws Exception { @@ -579,6 +634,31 @@ public void testSkipClusterFeaturesAllRequiredNoneToSkipMatch() { assertTrue(section.requiresCriteriaMet(mockContext)); } + public void testSkipKnownIssue() { + PrerequisiteSection section = new PrerequisiteSection( + List.of(Prerequisites.skipOnKnownIssue(List.of(new KnownIssue("bug1", "fix1"), new KnownIssue("bug2", "fix2")))), + "foobar", + emptyList(), + "foobar", + emptyList() + ); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + assertFalse(section.skipCriteriaMet(mockContext)); + + when(mockContext.clusterHasFeature("bug1")).thenReturn(true); + assertTrue(section.skipCriteriaMet(mockContext)); + + when(mockContext.clusterHasFeature("fix1")).thenReturn(true); + assertFalse(section.skipCriteriaMet(mockContext)); + + when(mockContext.clusterHasFeature("bug2")).thenReturn(true); + assertTrue(section.skipCriteriaMet(mockContext)); + + when(mockContext.clusterHasFeature("fix2")).thenReturn(true); + assertFalse(section.skipCriteriaMet(mockContext)); + } + public void evaluateEmpty() { var section = new PrerequisiteSection(List.of(), "unsupported", List.of(), "required", List.of()); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregator.java index 223a36098d6b1..73ed4d90ce431 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregator.java @@ -72,8 +72,8 @@ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long bucket) throws IOException { - TDigestState state = getExistingOrNewHistogram(bigArrays(), bucket); if (values.advanceExact(doc)) { + TDigestState state = getExistingOrNewHistogram(bigArrays(), bucket); final HistogramValue sketch = values.histogram(); while (sketch.next()) { state.add(sketch.value(), sketch.count()); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/HistogramRateAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/HistogramRateAggregator.java index c0ef9b4f7374b..5499ec8fa8c02 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/HistogramRateAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/HistogramRateAggregator.java @@ -42,10 +42,11 @@ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long bucket) throws IOException { - sums = bigArrays().grow(sums, bucket + 1); - compensations = bigArrays().grow(compensations, bucket + 1); - if (values.advanceExact(doc)) { + + sums = bigArrays().grow(sums, bucket + 1); + compensations = bigArrays().grow(compensations, bucket + 1); + final HistogramValue sketch = values.histogram(); while (sketch.next()) { double sum = sums.get(bucket); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/NumericRateAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/NumericRateAggregator.java index 964dac08c097e..b8a4e20111365 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/NumericRateAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/NumericRateAggregator.java @@ -70,10 +70,10 @@ public void collect(int doc, long bucket) throws IOException { return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long bucket) throws IOException { - sums = bigArrays().grow(sums, bucket + 1); - compensations = bigArrays().grow(compensations, bucket + 1); - if (values.advanceExact(doc)) { + sums = bigArrays().grow(sums, bucket + 1); + compensations = bigArrays().grow(compensations, bucket + 1); + final int valuesCount = values.docValueCount(); // Compute the sum of double values with Kahan summation algorithm which is more // accurate than naive summation. diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/StringStatsAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/StringStatsAggregator.java index 8fd75dc4c2e35..2d39e47e7d1c7 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/StringStatsAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/StringStatsAggregator.java @@ -89,18 +89,17 @@ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long bucket) throws IOException { - final long overSize = BigArrays.overSize(bucket + 1); - if (bucket >= count.size()) { - final long from = count.size(); - count = bigArrays().resize(count, overSize); - totalLength = bigArrays().resize(totalLength, overSize); - minLength = bigArrays().resize(minLength, overSize); - maxLength = bigArrays().resize(maxLength, overSize); - minLength.fill(from, overSize, Integer.MAX_VALUE); - maxLength.fill(from, overSize, Integer.MIN_VALUE); - } - if (values.advanceExact(doc)) { + final long overSize = BigArrays.overSize(bucket + 1); + if (bucket >= count.size()) { + final long from = count.size(); + count = bigArrays().resize(count, overSize); + totalLength = bigArrays().resize(totalLength, overSize); + minLength = bigArrays().resize(minLength, overSize); + maxLength = bigArrays().resize(maxLength, overSize); + minLength.fill(from, overSize, Integer.MAX_VALUE); + maxLength.fill(from, overSize, Integer.MIN_VALUE); + } final int valuesCount = values.docValueCount(); count.increment(bucket, valuesCount); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/UnpairedTTestAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/UnpairedTTestAggregator.java index 0f74e3466dd0a..004637a7df7f9 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/UnpairedTTestAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/UnpairedTTestAggregator.java @@ -86,7 +86,7 @@ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, return new LeafBucketCollectorBase(sub, docAValues) { - private static void processValues( + private void processValues( int doc, long bucket, SortedNumericDoubleValues docValues, @@ -95,6 +95,7 @@ private static void processValues( TTestStatsBuilder builder ) throws IOException { if (docValues.advanceExact(doc)) { + builder.grow(bigArrays(), bucket + 1); final int numValues = docValues.docValueCount(); for (int i = 0; i < numValues; i++) { builder.addValue(compSum, compSumOfSqr, bucket, docValues.nextValue()); @@ -105,12 +106,10 @@ private static void processValues( @Override public void collect(int doc, long bucket) throws IOException { if (bitsA == null || bitsA.get(doc)) { - a.grow(bigArrays(), bucket + 1); processValues(doc, bucket, docAValues, compSumA, compSumOfSqrA, a); } if (bitsB == null || bitsB.get(doc)) { processValues(doc, bucket, docBValues, compSumB, compSumOfSqrB, b); - b.grow(bigArrays(), bucket + 1); } } }; diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java index 1fafa8462c694..56f957ff488d5 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; @@ -239,7 +240,7 @@ protected SearchResponseIterator assertBlockingIterator( int numFailures, int progressStep ) throws Exception { - final String pitId; + final BytesReference pitId; final SubmitAsyncSearchRequest request; if (randomBoolean()) { OpenPointInTimeRequest openPIT = new OpenPointInTimeRequest(indexName).keepAlive(TimeValue.timeValueMinutes(between(5, 10))); diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/ProgressListenableActionFuture.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/ProgressListenableActionFuture.java index 7b8621f8821a6..0912da200735e 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/ProgressListenableActionFuture.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/ProgressListenableActionFuture.java @@ -136,7 +136,7 @@ protected void done(boolean success) { super.done(success); final List>> listenersToExecute; synchronized (this) { - assert progress == end || success == false; + assert completed == false; completed = true; listenersToExecute = this.listeners; listeners = null; diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index bd67e71eac041..be93bcf9945eb 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -1149,13 +1149,33 @@ public CacheFile getCacheFile(KeyType cacheKey, long length) { @FunctionalInterface public interface RangeAvailableHandler { - // caller that wants to read from x should instead do a positional read from x + relativePos - // caller should also only read up to length, further bytes will be offered by another call to this method + /** + * Callback method used to read data from the cache. The target is typically captured by the callback implementation. + * + * A caller should only read up to length, further bytes will be offered by another call to this method + * + * @param channel is the cache region to read from + * @param channelPos a position in the channel (cache file) to read from + * @param relativePos a position in the target buffer to store bytes and pass to the caller + * @param length of the blob that can be read (must not be exceeded) + * @return number of bytes read + * @throws IOException on failure + */ int onRangeAvailable(SharedBytes.IO channel, int channelPos, int relativePos, int length) throws IOException; } @FunctionalInterface public interface RangeMissingHandler { + /** + * Callback method used to fetch data (usually from a remote storage) and write it in the cache. + * + * @param channel is the cache region to write to + * @param channelPos a position in the channel (cache file) to write to + * @param relativePos the relative position in the remote storage to read from + * @param length of data to fetch + * @param progressUpdater consumer to invoke with the number of copied bytes as they are written in cache. + * This is used to notify waiting readers that data become available in cache. + */ void fillCacheRange(SharedBytes.IO channel, int channelPos, int relativePos, int length, IntConsumer progressUpdater) throws IOException; } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java index 139c6e04c32d4..7c8ebc5a66e80 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestCcrStatsAction extends BaseRestHandler { @@ -41,7 +42,7 @@ protected RestChannelConsumer prepareRequest(final RestRequest restRequest, fina if (restRequest.hasParam("timeout")) { request.setTimeout(restRequest.paramAsTime("timeout", null)); } - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute( CcrStatsAction.INSTANCE, request, diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java index 9e94e27f55811..1f96ea6be9dc5 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java @@ -15,6 +15,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction.INSTANCE; public class RestDeleteAutoFollowPatternAction extends BaseRestHandler { @@ -32,7 +33,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { Request request = new Request(restRequest.param("name")); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowInfoAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowInfoAction.java index 86be9487417f8..e20c34fe38243 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowInfoAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowInfoAction.java @@ -17,6 +17,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestFollowInfoAction extends BaseRestHandler { @@ -33,7 +34,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) { final FollowInfoAction.Request request = new FollowInfoAction.Request(); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); request.setFollowerIndices(Strings.splitStringByCommaToArray(restRequest.param("index"))); return channel -> client.execute(FollowInfoAction.INSTANCE, request, new RestRefCountedChunkedToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestGetAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestGetAutoFollowPatternAction.java index 519888a93afdf..84a8d4f879e02 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestGetAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestGetAutoFollowPatternAction.java @@ -15,6 +15,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.GetAutoFollowPatternAction.INSTANCE; public class RestGetAutoFollowPatternAction extends BaseRestHandler { @@ -33,7 +34,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { Request request = new Request(); request.setName(restRequest.param("name")); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseAutoFollowPatternAction.java index 6846d96a2f015..5a2ba2fe736f7 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseAutoFollowPatternAction.java @@ -15,6 +15,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.ActivateAutoFollowPatternAction.INSTANCE; public class RestPauseAutoFollowPatternAction extends BaseRestHandler { @@ -32,7 +33,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) { Request request = new Request(restRequest.param("name"), false); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseFollowAction.java index b08fbb039cbc3..8c0f79f0b2440 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseFollowAction.java @@ -14,6 +14,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.PauseFollowAction.INSTANCE; import static org.elasticsearch.xpack.core.ccr.action.PauseFollowAction.Request; @@ -32,7 +33,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { Request request = new Request(restRequest.param("index")); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutAutoFollowPatternAction.java index dd432411014ab..cb42431022501 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutAutoFollowPatternAction.java @@ -17,6 +17,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction.INSTANCE; public class RestPutAutoFollowPatternAction extends BaseRestHandler { @@ -40,7 +41,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient private static Request createRequest(RestRequest restRequest) throws IOException { try (XContentParser parser = restRequest.contentOrSourceParamParser()) { Request request = Request.fromXContent(parser, restRequest.param("name")); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return request; } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutFollowAction.java index 5ceef134090a4..162431d68fb0f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutFollowAction.java @@ -17,6 +17,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.PutFollowAction.INSTANCE; import static org.elasticsearch.xpack.core.ccr.action.PutFollowAction.Request; @@ -43,7 +44,7 @@ private static Request createRequest(RestRequest restRequest) throws IOException final Request request = Request.fromXContent(parser); request.waitForActiveShards(ActiveShardCount.parseString(restRequest.param("wait_for_active_shards"))); request.setFollowerIndex(restRequest.param("index")); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return request; } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestResumeAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestResumeAutoFollowPatternAction.java index a6ccf9fe1fa0b..3e51386ef1069 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestResumeAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestResumeAutoFollowPatternAction.java @@ -15,6 +15,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.ActivateAutoFollowPatternAction.INSTANCE; public class RestResumeAutoFollowPatternAction extends BaseRestHandler { @@ -32,7 +33,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) { Request request = new Request(restRequest.param("name"), true); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestResumeFollowAction.java index d3994c487e456..86a00ca1ff020 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestResumeFollowAction.java @@ -16,6 +16,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.INSTANCE; import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request; @@ -47,7 +48,7 @@ static Request createRequest(RestRequest restRequest) throws IOException { request = new Request(); request.setFollowerIndex(restRequest.param("index")); } - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return request; } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowAction.java index 5936fc05cb449..acc6ffb0a67bd 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowAction.java @@ -16,6 +16,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.UnfollowAction.INSTANCE; public class RestUnfollowAction extends BaseRestHandler { @@ -33,7 +34,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { UnfollowAction.Request request = new UnfollowAction.Request(restRequest.param("index")); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index e67372516688f..52343be3f2c23 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -409,7 +409,8 @@ private ClusterHealthStatus ensureColor( String color = clusterHealthStatus.name().toLowerCase(Locale.ROOT); String method = "ensure" + Strings.capitalize(color); - ClusterHealthRequest healthRequest = new ClusterHealthRequest(indices).timeout(timeout) + ClusterHealthRequest healthRequest = new ClusterHealthRequest(indices).masterNodeTimeout(timeout) + .timeout(timeout) .waitForStatus(clusterHealthStatus) .waitForEvents(Priority.LANGUID) .waitForNoRelocatingShards(true) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 980710d83d52a..3a16f368d322a 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -170,7 +170,7 @@ public void testAddRemoveShardOnLeader() throws Exception { if (leaderGroup.getReplicas().isEmpty() == false && randomInt(100) < 5) { IndexShard closingReplica = randomFrom(leaderGroup.getReplicas()); leaderGroup.removeReplica(closingReplica); - closingReplica.close("test", false); + closeShardNoCheck(closingReplica); closingReplica.store().close(); } else if (leaderGroup.getReplicas().isEmpty() == false && rarely()) { IndexShard newPrimary = randomFrom(leaderGroup.getReplicas()); diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java index 32024ff03ed15..1202f828059f6 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java @@ -29,7 +29,7 @@ public void testNodesInfoComponentVersions() { assertThat(response.getNodesMap().get(server1NodeId), notNullValue()); assertThat( response.getNodesMap().get(server1NodeId).getComponentVersions().keySet(), - containsInAnyOrder("transform_config_version", "ml_config_version") + containsInAnyOrder("transform_config_version", "ml_config_version", "api_key_version") ); } } diff --git a/x-pack/plugin/core/src/main/java/module-info.java b/x-pack/plugin/core/src/main/java/module-info.java index 77def0fd12459..070df2efc2629 100644 --- a/x-pack/plugin/core/src/main/java/module-info.java +++ b/x-pack/plugin/core/src/main/java/module-info.java @@ -232,7 +232,8 @@ provides org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber with org.elasticsearch.xpack.core.ml.MlConfigVersionComponent, - org.elasticsearch.xpack.core.transform.TransformConfigVersionComponent; + org.elasticsearch.xpack.core.transform.TransformConfigVersionComponent, + org.elasticsearch.xpack.core.security.action.apikey.ApiKey.VersionComponent; provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.xpack.core.XPackFeatures; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java index 044866ad07cb5..abb03e6e3037e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestDeleteLicenseAction extends BaseRestHandler { @@ -37,7 +38,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { AcknowledgedRequest.Plain deleteLicenseRequest = new AcknowledgedRequest.Plain(); deleteLicenseRequest.ackTimeout(request.paramAsTime("timeout", deleteLicenseRequest.ackTimeout())); - deleteLicenseRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteLicenseRequest.masterNodeTimeout())); + deleteLicenseRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin() .cluster() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java index b3e436d83165d..0d60be455ff29 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java @@ -17,6 +17,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestPostStartBasicLicense extends BaseRestHandler { @@ -34,7 +35,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli PostStartBasicRequest startBasicRequest = new PostStartBasicRequest(); startBasicRequest.acknowledge(request.paramAsBoolean("acknowledge", false)); startBasicRequest.ackTimeout(request.paramAsTime("timeout", startBasicRequest.ackTimeout())); - startBasicRequest.masterNodeTimeout(request.paramAsTime("master_timeout", startBasicRequest.masterNodeTimeout())); + startBasicRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute( PostStartBasicAction.INSTANCE, startBasicRequest, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java index 413c0d5ba0732..8f954d61548e2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java @@ -18,6 +18,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestPutLicenseAction extends BaseRestHandler { @@ -46,7 +47,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC putLicenseRequest.license(request.content(), request.getXContentType()); putLicenseRequest.acknowledge(request.paramAsBoolean("acknowledge", false)); putLicenseRequest.ackTimeout(request.paramAsTime("timeout", putLicenseRequest.ackTimeout())); - putLicenseRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putLicenseRequest.masterNodeTimeout())); + putLicenseRequest.masterNodeTimeout(getMasterNodeTimeout(request)); if (License.LicenseType.isBasic(putLicenseRequest.license().type())) { throw new IllegalArgumentException( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java index 771071c6e1029..b12f7bf2dc06a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java @@ -77,17 +77,17 @@ public boolean equals(Object o) { return false; } Request that = (Request) o; - return Objects.equals(this.timeout, that.timeout) && Objects.equals(this.masterNodeTimeout, that.masterNodeTimeout); + return Objects.equals(this.timeout, that.timeout) && Objects.equals(this.masterNodeTimeout(), that.masterNodeTimeout()); } @Override public int hashCode() { - return Objects.hash(this.timeout, this.masterNodeTimeout); + return Objects.hash(this.timeout, this.masterNodeTimeout()); } @Override public String toString() { - return "CcrStatsAction.Request[timeout=" + timeout + ", masterNodeTimeout=" + masterNodeTimeout + "]"; + return "CcrStatsAction.Request[timeout=" + timeout + ", masterNodeTimeout=" + masterNodeTimeout() + "]"; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java index 4bd3140753f5c..59a2cf3c936db 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.core.rest.action; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.core.TimeValue; import org.elasticsearch.http.HttpChannel; @@ -27,6 +26,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.OK; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestXPackUsageAction extends BaseRestHandler { @@ -43,7 +43,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - final TimeValue masterTimeout = request.paramAsTime("master_timeout", MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT); + final TimeValue masterTimeout = getMasterNodeTimeout(request); final HttpChannel httpChannel = request.getHttpChannel(); return channel -> new XPackUsageRequestBuilder(new RestCancellableNodeClient(client, httpChannel)).setMasterNodeTimeout( masterTimeout diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java index 76b0634b55b39..3cb7b5b07fc1b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java @@ -222,7 +222,7 @@ public boolean equals(Object o) { && Objects.equals(snapshotIndexName, that.snapshotIndexName) && Objects.equals(indexSettings, that.indexSettings) && Arrays.equals(ignoreIndexSettings, that.ignoreIndexSettings) - && Objects.equals(masterNodeTimeout, that.masterNodeTimeout); + && Objects.equals(masterNodeTimeout(), that.masterNodeTimeout()); } @Override @@ -234,7 +234,7 @@ public int hashCode() { snapshotIndexName, indexSettings, waitForCompletion, - masterNodeTimeout, + masterNodeTimeout(), storage ); result = 31 * result + Arrays.hashCode(ignoreIndexSettings); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java index 57cf816a46072..cee63c16229e0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.core.security.action.apikey; +import org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber; +import org.elasticsearch.common.VersionId; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Assertions; import org.elasticsearch.core.Nullable; @@ -81,6 +83,28 @@ public String value() { } } + public record Version(int version) implements VersionId { + @Override + public int id() { + return version; + } + } + + public static class VersionComponent implements ComponentVersionNumber { + + @Override + public String componentId() { + return "api_key_version"; + } + + @Override + public VersionId versionNumber() { + return CURRENT_API_KEY_VERSION; + } + } + + public static final ApiKey.Version CURRENT_API_KEY_VERSION = new ApiKey.Version(8_13_00_99); + private final String name; private final String id; private final Type type; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java index bae2d530a21a4..ba9639d3d5156 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java @@ -6,28 +6,16 @@ */ package org.elasticsearch.xpack.core.template; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; -import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.template.resources.TemplateResources; -import java.io.IOException; import java.util.Collections; import java.util.Map; -import java.util.function.Predicate; - -import static org.elasticsearch.common.xcontent.XContentHelper.convertToMap; /** * Handling versioned templates for time-based indices in x-pack @@ -36,28 +24,6 @@ public class TemplateUtils { private TemplateUtils() {} - /** - * Loads a JSON template as a resource and puts it into the provided map - */ - public static void loadLegacyTemplateIntoMap( - String resource, - Map map, - String templateName, - String version, - String versionProperty, - Logger logger - ) { - final String template = loadTemplate(resource, version, versionProperty); - try ( - XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(XContentParserConfiguration.EMPTY, template) - ) { - map.put(templateName, IndexTemplateMetadata.Builder.fromXContent(parser, templateName)); - } catch (IOException e) { - // TODO: should we handle this with a thrown exception? - logger.error("Error loading template [{}] as part of metadata upgrading", templateName); - } - } - /** * Loads a built-in template and returns its source. */ @@ -126,70 +92,4 @@ public static boolean checkTemplateExistsAndVersionIsGTECurrentVersion(String te return templateMetadata.version() != null && templateMetadata.version() >= currentVersion; } - - /** - * Checks if a versioned template exists, and if it exists checks if it is up-to-date with current version. - * @param versionKey The property in the mapping's _meta field which stores the version info - * @param templateName Name of the index template - * @param state Cluster state - * @param logger Logger - */ - public static boolean checkTemplateExistsAndIsUpToDate(String templateName, String versionKey, ClusterState state, Logger logger) { - - return checkTemplateExistsAndVersionMatches(templateName, versionKey, state, logger, Version.CURRENT::equals); - } - - /** - * Checks if template with given name exists and if it matches the version predicate given - * @param versionKey The property in the mapping's _meta field which stores the version info - * @param templateName Name of the index template - * @param state Cluster state - * @param logger Logger - * @param predicate Predicate to execute on version check - */ - public static boolean checkTemplateExistsAndVersionMatches( - String templateName, - String versionKey, - ClusterState state, - Logger logger, - Predicate predicate - ) { - - IndexTemplateMetadata templateMeta = state.metadata().templates().get(templateName); - if (templateMeta == null) { - return false; - } - CompressedXContent mappings = templateMeta.getMappings(); - - // check all mappings contain correct version in _meta - // we have to parse the source here which is annoying - if (mappings != null) { - try { - Map typeMappingMap = convertToMap(mappings.uncompressed(), false, XContentType.JSON).v2(); - // should always contain one entry with key = typename - assert (typeMappingMap.size() == 1); - String key = typeMappingMap.keySet().iterator().next(); - // get the actual mapping entries - @SuppressWarnings("unchecked") - Map mappingMap = (Map) typeMappingMap.get(key); - if (containsCorrectVersion(versionKey, mappingMap, predicate) == false) { - return false; - } - } catch (ElasticsearchParseException e) { - logger.error(() -> "Cannot parse the template [" + templateName + "]", e); - throw new IllegalStateException("Cannot parse the template " + templateName, e); - } - } - return true; - } - - private static boolean containsCorrectVersion(String versionKey, Map typeMappingMap, Predicate predicate) { - @SuppressWarnings("unchecked") - Map meta = (Map) typeMappingMap.get("_meta"); - if (meta == null) { - // pre 5.0, cannot be up to date - return false; - } - return predicate.test(Version.fromString((String) meta.get(versionKey))); - } } diff --git a/x-pack/plugin/core/src/main/resources/META-INF/services/org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber b/x-pack/plugin/core/src/main/resources/META-INF/services/org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber index 078217faee53a..568483f03f756 100644 --- a/x-pack/plugin/core/src/main/resources/META-INF/services/org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber +++ b/x-pack/plugin/core/src/main/resources/META-INF/services/org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber @@ -1,2 +1,3 @@ org.elasticsearch.xpack.core.ml.MlConfigVersionComponent org.elasticsearch.xpack.core.transform.TransformConfigVersionComponent +org.elasticsearch.xpack.core.security.action.apikey.ApiKey$VersionComponent diff --git a/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json b/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json index dd69b9cecefc5..0e82cc0f2a6df 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json @@ -13,10 +13,12 @@ "index.priority": 10, "index.format": 1 }, + "version": ${idp.template.version}, "mappings": { "_doc": { "_meta": { - "idp-version": "${idp.template.version}" + "idp-version": "${idp.template.version_deprecated}", + "idp-template-version": "${idp.template.version}" }, "dynamic": "strict", "properties": { diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java index 93be79e859f8d..54fd4946df554 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java @@ -37,7 +37,6 @@ public record DownsampleShardTaskParams( String[] dimensions ) implements PersistentTaskParams { - private static final TransportVersion V_8_13_0 = TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS; public static final String NAME = DownsampleShardTask.TASK_NAME; private static final ParseField DOWNSAMPLE_CONFIG = new ParseField("downsample_config"); private static final ParseField DOWNSAMPLE_INDEX = new ParseField("rollup_index"); @@ -73,7 +72,7 @@ public record DownsampleShardTaskParams( new ShardId(in), in.readStringArray(), in.readStringArray(), - in.getTransportVersion().onOrAfter(V_8_13_0) ? in.readOptionalStringArray() : new String[] {} + in.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0) ? in.readOptionalStringArray() : new String[] {} ); } @@ -112,7 +111,7 @@ public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); out.writeStringArray(metrics); out.writeStringArray(labels); - if (out.getTransportVersion().onOrAfter(V_8_13_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { out.writeOptionalStringArray(dimensions); } } diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java index d68f6e8d11f81..80bb0368a1afc 100644 --- a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java @@ -555,8 +555,13 @@ public void onFailure(Exception e) { } }); - // Downsample with retries, in case the downsampled index is not ready. - assertBusy(() -> downsample(sourceIndex, downsampleIndex, config), 120, TimeUnit.SECONDS); + assertBusy(() -> { + try { + client().execute(DownsampleAction.INSTANCE, new DownsampleAction.Request(sourceIndex, downsampleIndex, TIMEOUT, config)); + } catch (ElasticsearchException e) { + fail("transient failure due to overlapping downsample operations"); + } + }); // We must wait until the in-progress downsample ends, otherwise data will not be cleaned up assertBusy(() -> assertTrue("In progress downsample did not complete", downsampleListener.success), 60, TimeUnit.SECONDS); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java index cce3cdeb97961..2cfa4db37ee07 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.search.TransportClosePointInTimeAction; import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.get.GetResult; @@ -44,7 +45,7 @@ // search and multi-search hence the code repetition public class PITAwareQueryClient extends BasicQueryClient { - private String pitId; + private BytesReference pitId; private final TimeValue keepAlive; private final QueryBuilder filter; @@ -114,7 +115,10 @@ private void makeRequestPITCompatible(SearchRequest request) { } // listener handing the extraction of new PIT and closing in case of exceptions - private ActionListener pitListener(Function pitIdExtractor, ActionListener listener) { + private ActionListener pitListener( + Function pitIdExtractor, + ActionListener listener + ) { return wrap(r -> { // get pid pitId = pitIdExtractor.apply(r); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java index afb9b590914dd..943d1275364fb 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java @@ -22,6 +22,8 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.TestCircuitBreaker; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -192,7 +194,7 @@ private List mockCriteria() { private class ESMockClient extends NoOpClient { protected final CircuitBreaker circuitBreaker; - private final String pitId = "test_pit_id"; + private final BytesReference pitId = new BytesArray("test_pit_id"); ESMockClient(ThreadPool threadPool, CircuitBreaker circuitBreaker) { super(threadPool); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java index 0bdb88592ce0f..c0e5d398d6508 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java @@ -21,6 +21,8 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -179,7 +181,7 @@ public void fetchHits(Iterable> refs, ActionListener lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new BooleanLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.BOOLEAN; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java index 51418445713b0..aac728236b136 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java @@ -9,7 +9,9 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -113,6 +115,11 @@ public BooleanBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new BooleanLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.BOOLEAN; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java index f365a2ed78610..8ae2984018640 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java @@ -11,6 +11,8 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.index.mapper.BlockLoader; import java.io.IOException; @@ -38,6 +40,9 @@ public sealed interface BooleanBlock extends Block permits BooleanArrayBlock, Bo @Override BooleanBlock filter(int... positions); + @Override + ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize); + @Override BooleanBlock expand(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanLookup.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanLookup.java new file mode 100644 index 0000000000000..f969e164eef68 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanLookup.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; + +/** + * Generic {@link Block#lookup} implementation {@link BooleanBlock}s. + * This class is generated. Do not edit it. + */ +final class BooleanLookup implements ReleasableIterator { + private final BooleanBlock values; + private final IntBlock positions; + private final long targetByteSize; + private int position; + + private boolean first; + private int valuesInPosition; + + BooleanLookup(BooleanBlock values, IntBlock positions, ByteSizeValue targetBlockSize) { + values.incRef(); + positions.incRef(); + this.values = values; + this.positions = positions; + this.targetByteSize = targetBlockSize.getBytes(); + } + + @Override + public boolean hasNext() { + return position < positions.getPositionCount(); + } + + @Override + public BooleanBlock next() { + try (BooleanBlock.Builder builder = positions.blockFactory().newBooleanBlockBuilder(positions.getTotalValueCount())) { + int count = 0; + while (position < positions.getPositionCount()) { + int start = positions.getFirstValueIndex(position); + int end = start + positions.getValueCount(position); + valuesInPosition = 0; + for (int i = start; i < end; i++) { + copy(builder, positions.getInt(i)); + } + switch (valuesInPosition) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendBoolean(first); + default -> builder.endPositionEntry(); + } + position++; + // TOOD what if the estimate is super huge? should we break even with less than MIN_TARGET? + if (++count > Operator.MIN_TARGET_PAGE_SIZE && builder.estimatedBytes() < targetByteSize) { + break; + } + } + return builder.build(); + } + } + + private void copy(BooleanBlock.Builder builder, int valuePosition) { + if (valuePosition >= values.getPositionCount()) { + return; + } + int start = values.getFirstValueIndex(valuePosition); + int end = start + values.getValueCount(valuePosition); + for (int i = start; i < end; i++) { + if (valuesInPosition == 0) { + first = values.getBoolean(i); + valuesInPosition++; + continue; + } + if (valuesInPosition == 1) { + builder.beginPositionEntry(); + builder.appendBoolean(first); + } + if (valuesInPosition > Block.MAX_LOOKUP) { + // TODO replace this with a warning and break + throw new IllegalArgumentException("Found a single entry with " + valuesInPosition + " entries"); + } + builder.appendBoolean(values.getBoolean(i)); + valuesInPosition++; + } + } + + @Override + public void close() { + Releasables.close(values, positions); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java index 70fcfeca94869..013718bb42a7d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; /** @@ -49,6 +51,12 @@ public BooleanBlock filter(int... positions) { return vector.filter(positions).asBlock(); } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + // TODO optimizations + return new BooleanLookup(this, positions, targetBlockSize); + } + @Override public BooleanBlock expand() { incRef(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java index 8eaf07b473a3a..c33bd12b74bbd 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java @@ -10,7 +10,9 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BytesRefArray; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -116,6 +118,11 @@ public BytesRefBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new BytesRefLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.BYTES_REF; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java index a6c75dbc1122f..d3afcfd6dde4d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java @@ -12,6 +12,8 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.index.mapper.BlockLoader; import java.io.IOException; @@ -42,6 +44,9 @@ public sealed interface BytesRefBlock extends Block permits BytesRefArrayBlock, @Override BytesRefBlock filter(int... positions); + @Override + ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize); + @Override BytesRefBlock expand(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefLookup.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefLookup.java new file mode 100644 index 0000000000000..3ec62902fe048 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefLookup.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; + +/** + * Generic {@link Block#lookup} implementation {@link BytesRefBlock}s. + * This class is generated. Do not edit it. + */ +final class BytesRefLookup implements ReleasableIterator { + private final BytesRef firstScratch = new BytesRef(); + private final BytesRef valueScratch = new BytesRef(); + private final BytesRefBlock values; + private final IntBlock positions; + private final long targetByteSize; + private int position; + + private BytesRef first; + private int valuesInPosition; + + BytesRefLookup(BytesRefBlock values, IntBlock positions, ByteSizeValue targetBlockSize) { + values.incRef(); + positions.incRef(); + this.values = values; + this.positions = positions; + this.targetByteSize = targetBlockSize.getBytes(); + } + + @Override + public boolean hasNext() { + return position < positions.getPositionCount(); + } + + @Override + public BytesRefBlock next() { + try (BytesRefBlock.Builder builder = positions.blockFactory().newBytesRefBlockBuilder(positions.getTotalValueCount())) { + int count = 0; + while (position < positions.getPositionCount()) { + int start = positions.getFirstValueIndex(position); + int end = start + positions.getValueCount(position); + valuesInPosition = 0; + for (int i = start; i < end; i++) { + copy(builder, positions.getInt(i)); + } + switch (valuesInPosition) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendBytesRef(first); + default -> builder.endPositionEntry(); + } + position++; + // TOOD what if the estimate is super huge? should we break even with less than MIN_TARGET? + if (++count > Operator.MIN_TARGET_PAGE_SIZE && builder.estimatedBytes() < targetByteSize) { + break; + } + } + return builder.build(); + } + } + + private void copy(BytesRefBlock.Builder builder, int valuePosition) { + if (valuePosition >= values.getPositionCount()) { + return; + } + int start = values.getFirstValueIndex(valuePosition); + int end = start + values.getValueCount(valuePosition); + for (int i = start; i < end; i++) { + if (valuesInPosition == 0) { + first = values.getBytesRef(i, firstScratch); + valuesInPosition++; + continue; + } + if (valuesInPosition == 1) { + builder.beginPositionEntry(); + builder.appendBytesRef(first); + } + if (valuesInPosition > Block.MAX_LOOKUP) { + // TODO replace this with a warning and break + throw new IllegalArgumentException("Found a single entry with " + valuesInPosition + " entries"); + } + builder.appendBytesRef(values.getBytesRef(i, valueScratch)); + valuesInPosition++; + } + } + + @Override + public void close() { + Releasables.close(values, positions); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java index 8c8c3b59ff758..9838fde8a0ffe 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java @@ -8,6 +8,8 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; /** @@ -50,6 +52,12 @@ public BytesRefBlock filter(int... positions) { return vector.filter(positions).asBlock(); } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + // TODO optimizations + return new BytesRefLookup(this, positions, targetBlockSize); + } + @Override public BytesRefBlock expand() { incRef(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java index d545fca4fca8d..4d923e4ca77c8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -112,6 +114,11 @@ public DoubleBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new DoubleLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.DOUBLE; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java index 5698f40b530b7..203856f88c4ce 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java @@ -9,7 +9,9 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.DoubleArray; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -113,6 +115,11 @@ public DoubleBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new DoubleLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.DOUBLE; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java index a682c2cba019e..95f318703df62 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java @@ -11,6 +11,8 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.index.mapper.BlockLoader; import java.io.IOException; @@ -38,6 +40,9 @@ public sealed interface DoubleBlock extends Block permits DoubleArrayBlock, Doub @Override DoubleBlock filter(int... positions); + @Override + ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize); + @Override DoubleBlock expand(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleLookup.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleLookup.java new file mode 100644 index 0000000000000..bcb8a414f7c57 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleLookup.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; + +/** + * Generic {@link Block#lookup} implementation {@link DoubleBlock}s. + * This class is generated. Do not edit it. + */ +final class DoubleLookup implements ReleasableIterator { + private final DoubleBlock values; + private final IntBlock positions; + private final long targetByteSize; + private int position; + + private double first; + private int valuesInPosition; + + DoubleLookup(DoubleBlock values, IntBlock positions, ByteSizeValue targetBlockSize) { + values.incRef(); + positions.incRef(); + this.values = values; + this.positions = positions; + this.targetByteSize = targetBlockSize.getBytes(); + } + + @Override + public boolean hasNext() { + return position < positions.getPositionCount(); + } + + @Override + public DoubleBlock next() { + try (DoubleBlock.Builder builder = positions.blockFactory().newDoubleBlockBuilder(positions.getTotalValueCount())) { + int count = 0; + while (position < positions.getPositionCount()) { + int start = positions.getFirstValueIndex(position); + int end = start + positions.getValueCount(position); + valuesInPosition = 0; + for (int i = start; i < end; i++) { + copy(builder, positions.getInt(i)); + } + switch (valuesInPosition) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendDouble(first); + default -> builder.endPositionEntry(); + } + position++; + // TOOD what if the estimate is super huge? should we break even with less than MIN_TARGET? + if (++count > Operator.MIN_TARGET_PAGE_SIZE && builder.estimatedBytes() < targetByteSize) { + break; + } + } + return builder.build(); + } + } + + private void copy(DoubleBlock.Builder builder, int valuePosition) { + if (valuePosition >= values.getPositionCount()) { + return; + } + int start = values.getFirstValueIndex(valuePosition); + int end = start + values.getValueCount(valuePosition); + for (int i = start; i < end; i++) { + if (valuesInPosition == 0) { + first = values.getDouble(i); + valuesInPosition++; + continue; + } + if (valuesInPosition == 1) { + builder.beginPositionEntry(); + builder.appendDouble(first); + } + if (valuesInPosition > Block.MAX_LOOKUP) { + // TODO replace this with a warning and break + throw new IllegalArgumentException("Found a single entry with " + valuesInPosition + " entries"); + } + builder.appendDouble(values.getDouble(i)); + valuesInPosition++; + } + } + + @Override + public void close() { + Releasables.close(values, positions); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java index eec6675e93ae7..e76a4e0c5fdee 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; /** @@ -49,6 +51,12 @@ public DoubleBlock filter(int... positions) { return vector.filter(positions).asBlock(); } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + // TODO optimizations + return new DoubleLookup(this, positions, targetBlockSize); + } + @Override public DoubleBlock expand() { incRef(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java index 41c9d3b84485d..6231e8f9c5a10 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -112,6 +114,11 @@ public IntBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new IntLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.INT; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java index 66c0b15415418..a1e84db8c4f27 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java @@ -9,7 +9,9 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.IntArray; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -113,6 +115,11 @@ public IntBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new IntLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.INT; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java index e9d606b51c6a1..21d40170151a5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java @@ -11,6 +11,8 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.index.mapper.BlockLoader; import java.io.IOException; @@ -38,6 +40,9 @@ public sealed interface IntBlock extends Block permits IntArrayBlock, IntVectorB @Override IntBlock filter(int... positions); + @Override + ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize); + @Override IntBlock expand(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntLookup.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntLookup.java new file mode 100644 index 0000000000000..b7ea15cd9d818 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntLookup.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; + +/** + * Generic {@link Block#lookup} implementation {@link IntBlock}s. + * This class is generated. Do not edit it. + */ +final class IntLookup implements ReleasableIterator { + private final IntBlock values; + private final IntBlock positions; + private final long targetByteSize; + private int position; + + private int first; + private int valuesInPosition; + + IntLookup(IntBlock values, IntBlock positions, ByteSizeValue targetBlockSize) { + values.incRef(); + positions.incRef(); + this.values = values; + this.positions = positions; + this.targetByteSize = targetBlockSize.getBytes(); + } + + @Override + public boolean hasNext() { + return position < positions.getPositionCount(); + } + + @Override + public IntBlock next() { + try (IntBlock.Builder builder = positions.blockFactory().newIntBlockBuilder(positions.getTotalValueCount())) { + int count = 0; + while (position < positions.getPositionCount()) { + int start = positions.getFirstValueIndex(position); + int end = start + positions.getValueCount(position); + valuesInPosition = 0; + for (int i = start; i < end; i++) { + copy(builder, positions.getInt(i)); + } + switch (valuesInPosition) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendInt(first); + default -> builder.endPositionEntry(); + } + position++; + // TOOD what if the estimate is super huge? should we break even with less than MIN_TARGET? + if (++count > Operator.MIN_TARGET_PAGE_SIZE && builder.estimatedBytes() < targetByteSize) { + break; + } + } + return builder.build(); + } + } + + private void copy(IntBlock.Builder builder, int valuePosition) { + if (valuePosition >= values.getPositionCount()) { + return; + } + int start = values.getFirstValueIndex(valuePosition); + int end = start + values.getValueCount(valuePosition); + for (int i = start; i < end; i++) { + if (valuesInPosition == 0) { + first = values.getInt(i); + valuesInPosition++; + continue; + } + if (valuesInPosition == 1) { + builder.beginPositionEntry(); + builder.appendInt(first); + } + if (valuesInPosition > Block.MAX_LOOKUP) { + // TODO replace this with a warning and break + throw new IllegalArgumentException("Found a single entry with " + valuesInPosition + " entries"); + } + builder.appendInt(values.getInt(i)); + valuesInPosition++; + } + } + + @Override + public void close() { + Releasables.close(values, positions); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java index 39f8426a8da3a..70bcf6919bea6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; /** @@ -49,6 +51,12 @@ public IntBlock filter(int... positions) { return vector.filter(positions).asBlock(); } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + // TODO optimizations + return new IntLookup(this, positions, targetBlockSize); + } + @Override public IntBlock expand() { incRef(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java index 56370f718bae0..d8357e5d367cc 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -112,6 +114,11 @@ public LongBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new LongLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.LONG; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java index e3b17cc7be5d4..0ccd4ab368659 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java @@ -9,7 +9,9 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -113,6 +115,11 @@ public LongBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new LongLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.LONG; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java index 3e1c5fcfaac95..5a11ee8e2a6e3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java @@ -11,6 +11,8 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.index.mapper.BlockLoader; import java.io.IOException; @@ -38,6 +40,9 @@ public sealed interface LongBlock extends Block permits LongArrayBlock, LongVect @Override LongBlock filter(int... positions); + @Override + ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize); + @Override LongBlock expand(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongLookup.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongLookup.java new file mode 100644 index 0000000000000..ca1b06d70b1d1 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongLookup.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; + +/** + * Generic {@link Block#lookup} implementation {@link LongBlock}s. + * This class is generated. Do not edit it. + */ +final class LongLookup implements ReleasableIterator { + private final LongBlock values; + private final IntBlock positions; + private final long targetByteSize; + private int position; + + private long first; + private int valuesInPosition; + + LongLookup(LongBlock values, IntBlock positions, ByteSizeValue targetBlockSize) { + values.incRef(); + positions.incRef(); + this.values = values; + this.positions = positions; + this.targetByteSize = targetBlockSize.getBytes(); + } + + @Override + public boolean hasNext() { + return position < positions.getPositionCount(); + } + + @Override + public LongBlock next() { + try (LongBlock.Builder builder = positions.blockFactory().newLongBlockBuilder(positions.getTotalValueCount())) { + int count = 0; + while (position < positions.getPositionCount()) { + int start = positions.getFirstValueIndex(position); + int end = start + positions.getValueCount(position); + valuesInPosition = 0; + for (int i = start; i < end; i++) { + copy(builder, positions.getInt(i)); + } + switch (valuesInPosition) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendLong(first); + default -> builder.endPositionEntry(); + } + position++; + // TOOD what if the estimate is super huge? should we break even with less than MIN_TARGET? + if (++count > Operator.MIN_TARGET_PAGE_SIZE && builder.estimatedBytes() < targetByteSize) { + break; + } + } + return builder.build(); + } + } + + private void copy(LongBlock.Builder builder, int valuePosition) { + if (valuePosition >= values.getPositionCount()) { + return; + } + int start = values.getFirstValueIndex(valuePosition); + int end = start + values.getValueCount(valuePosition); + for (int i = start; i < end; i++) { + if (valuesInPosition == 0) { + first = values.getLong(i); + valuesInPosition++; + continue; + } + if (valuesInPosition == 1) { + builder.beginPositionEntry(); + builder.appendLong(first); + } + if (valuesInPosition > Block.MAX_LOOKUP) { + // TODO replace this with a warning and break + throw new IllegalArgumentException("Found a single entry with " + valuesInPosition + " entries"); + } + builder.appendLong(values.getLong(i)); + valuesInPosition++; + } + } + + @Override + public void close() { + Releasables.close(values, positions); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java index b573e025c0be1..b6f1e8e77505d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; /** @@ -49,6 +51,12 @@ public LongBlock filter(int... positions) { return vector.filter(positions).asBlock(); } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + // TODO optimizations + return new LongLookup(this, positions, targetBlockSize); + } + @Override public LongBlock expand() { incRef(); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java index 769155db5ecfa..809c433a000a7 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java @@ -59,7 +59,6 @@ */ final class PackedValuesBlockHash extends BlockHash { static final int DEFAULT_BATCH_SIZE = Math.toIntExact(ByteSizeValue.ofKb(10).getBytes()); - private static final long MAX_LOOKUP = 100_000; private final int emitBatchSize; private final BytesRefHash bytesRefHash; @@ -183,14 +182,14 @@ public ReleasableIterator lookup(Page page, ByteSizeValue targetBlockS class LookupWork implements ReleasableIterator { private final Group[] groups; - private final long targetBytesSize; + private final long targetByteSize; private final int positionCount; private int position; - LookupWork(Page page, long targetBytesSize, int batchSize) { + LookupWork(Page page, long targetByteSize, int batchSize) { this.groups = specs.stream().map(s -> new Group(s, page, batchSize)).toArray(Group[]::new); this.positionCount = page.getPositionCount(); - this.targetBytesSize = targetBytesSize; + this.targetByteSize = targetByteSize; } @Override @@ -200,9 +199,10 @@ public boolean hasNext() { @Override public IntBlock next() { - int size = Math.toIntExact(Math.min(Integer.MAX_VALUE, targetBytesSize / Integer.BYTES / 2)); + int size = Math.toIntExact(Math.min(Integer.MAX_VALUE, targetByteSize / Integer.BYTES / 2)); try (IntBlock.Builder ords = blockFactory.newIntBlockBuilder(size)) { - while (position < positionCount && ords.estimatedBytes() < targetBytesSize) { + while (position < positionCount && ords.estimatedBytes() < targetByteSize) { + // TODO a test where targetByteSize is very small should still make a few rows. boolean singleEntry = startPosition(groups); if (singleEntry) { lookupSingleEntry(ords); @@ -247,7 +247,7 @@ private void lookupMultipleEntries(IntBlock.Builder ords) { } ords.appendInt(Math.toIntExact(found)); count++; - if (count > MAX_LOOKUP) { + if (count > Block.MAX_LOOKUP) { // TODO replace this with a warning and break throw new IllegalArgumentException("Found a single entry with " + count + " entries"); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java index 709ad4165170d..ed7ee93c99325 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java @@ -10,8 +10,10 @@ import org.apache.lucene.util.Accountable; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.mapper.BlockLoader; @@ -36,6 +38,11 @@ * the same block at the same time. */ public interface Block extends Accountable, BlockLoader.Block, NamedWriteable, RefCounted, Releasable { + /** + * The maximum number of values that can be added to one position via lookup. + * TODO maybe make this everywhere? + */ + long MAX_LOOKUP = 100_000; /** * {@return an efficient dense single-value view of this block}. @@ -114,6 +121,33 @@ public interface Block extends Accountable, BlockLoader.Block, NamedWriteable, R */ Block filter(int... positions); + /** + * Builds an Iterator of new {@link Block}s with the same {@link #elementType} + * as this Block whose values are copied from positions in this Block. It has the + * same number of {@link #getPositionCount() positions} as the {@code positions} + * parameter. + *

+ * For example, this this block contained {@code [a, b, [b, c]]} + * and were called with the block {@code [0, 1, 1, [1, 2]]} then the + * result would be {@code [a, b, b, [b, b, c]]}. + *

+ *

+ * This process produces {@code count(this) * count(positions)} values per + * positions which could be quite quite large. Instead of returning a single + * Block, this returns an Iterator of Blocks containing all of the promised + * values. + *

+ *

+ * The returned {@link ReleasableIterator} may retain a reference to {@link Block}s + * inside the {@link Page}. Close it to release those references. + *

+ *

+ * This block is built using the same {@link BlockFactory} as was used to + * build the {@code positions} parameter. + *

+ */ + ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize); + /** * How are multivalued fields ordered? * Some operators can enable its optimization when mv_values are sorted ascending or de-duplicated. diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java index bdeb5334e0da7..1baa4d2283b25 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java @@ -11,6 +11,8 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import java.io.IOException; import java.util.Objects; @@ -75,6 +77,11 @@ public ConstantNullBlock filter(int... positions) { return (ConstantNullBlock) blockFactory().newConstantNullBlock(positions.length); } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return ReleasableIterator.single((ConstantNullBlock) positions.blockFactory().newConstantNullBlock(positions.getPositionCount())); + } + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( Block.class, "ConstantNullBlock", diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java index f454abe7d2cfe..e5a0d934aa01a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java @@ -8,6 +8,8 @@ package org.elasticsearch.compute.data; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -48,6 +50,11 @@ public Block filter(int... positions) { return new DocBlock(asVector().filter(positions)); } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + throw new UnsupportedOperationException(); + } + @Override public DocBlock expand() { incRef(); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java index 64e3faca1f517..41ab5256e9109 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -118,6 +120,11 @@ public BytesRefBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new BytesRefLookup(this, positions, targetBlockSize); + } + @Override protected void closeInternal() { Releasables.close(ordinals, bytes); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st index 9b153317c8a0e..1de2fa239e61e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st @@ -11,15 +11,16 @@ $if(BytesRef)$ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BytesRefArray; -import org.elasticsearch.core.Releasables; - $else$ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +$endif$ +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; -$endif$ import java.io.IOException; import java.util.BitSet; @@ -132,6 +133,11 @@ $endif$ } } + @Override + public ReleasableIterator<$Type$Block> lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new $Type$Lookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.$TYPE$; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st index 53f0bb09640c5..66bdcc5d39fb0 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st @@ -9,7 +9,9 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.$Array$; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -113,6 +115,11 @@ public final class $Type$BigArrayBlock extends AbstractArrayBlock implements $Ty } } + @Override + public ReleasableIterator<$Type$Block> lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new $Type$Lookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.$TYPE$; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st index 331a5713fa3d1..b9d3dfc1f16ff 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st @@ -14,6 +14,8 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.index.mapper.BlockLoader; import java.io.IOException; @@ -58,6 +60,9 @@ $endif$ @Override $Type$Block filter(int... positions); + @Override + ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize); + @Override $Type$Block expand(); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Lookup.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Lookup.java.st new file mode 100644 index 0000000000000..668752fe3f59f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Lookup.java.st @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +$if(BytesRef)$ +import org.apache.lucene.util.BytesRef; +$endif$ +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; + +/** + * Generic {@link Block#lookup} implementation {@link $Type$Block}s. + * This class is generated. Do not edit it. + */ +final class $Type$Lookup implements ReleasableIterator<$Type$Block> { +$if(BytesRef)$ + private final BytesRef firstScratch = new BytesRef(); + private final BytesRef valueScratch = new BytesRef(); +$endif$ + private final $Type$Block values; + private final IntBlock positions; + private final long targetByteSize; + private int position; + + private $type$ first; + private int valuesInPosition; + + $Type$Lookup($Type$Block values, IntBlock positions, ByteSizeValue targetBlockSize) { + values.incRef(); + positions.incRef(); + this.values = values; + this.positions = positions; + this.targetByteSize = targetBlockSize.getBytes(); + } + + @Override + public boolean hasNext() { + return position < positions.getPositionCount(); + } + + @Override + public $Type$Block next() { + try ($Type$Block.Builder builder = positions.blockFactory().new$Type$BlockBuilder(positions.getTotalValueCount())) { + int count = 0; + while (position < positions.getPositionCount()) { + int start = positions.getFirstValueIndex(position); + int end = start + positions.getValueCount(position); + valuesInPosition = 0; + for (int i = start; i < end; i++) { + copy(builder, positions.getInt(i)); + } + switch (valuesInPosition) { + case 0 -> builder.appendNull(); + case 1 -> builder.append$Type$(first); + default -> builder.endPositionEntry(); + } + position++; + // TOOD what if the estimate is super huge? should we break even with less than MIN_TARGET? + if (++count > Operator.MIN_TARGET_PAGE_SIZE && builder.estimatedBytes() < targetByteSize) { + break; + } + } + return builder.build(); + } + } + + private void copy($Type$Block.Builder builder, int valuePosition) { + if (valuePosition >= values.getPositionCount()) { + return; + } + int start = values.getFirstValueIndex(valuePosition); + int end = start + values.getValueCount(valuePosition); + for (int i = start; i < end; i++) { + if (valuesInPosition == 0) { +$if(BytesRef)$ + first = values.get$Type$(i, firstScratch); +$else$ + first = values.get$Type$(i); +$endif$ + valuesInPosition++; + continue; + } + if (valuesInPosition == 1) { + builder.beginPositionEntry(); + builder.append$Type$(first); + } + if (valuesInPosition > Block.MAX_LOOKUP) { + // TODO replace this with a warning and break + throw new IllegalArgumentException("Found a single entry with " + valuesInPosition + " entries"); + } +$if(BytesRef)$ + builder.append$Type$(values.get$Type$(i, valueScratch)); +$else$ + builder.append$Type$(values.get$Type$(i)); +$endif$ + valuesInPosition++; + } + } + + @Override + public void close() { + Releasables.close(values, positions); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st index 8df5cea4c883b..274457a4d5bd8 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st @@ -10,6 +10,8 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; $endif$ +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; /** @@ -57,6 +59,12 @@ $endif$ return vector.filter(positions).asBlock(); } + @Override + public ReleasableIterator<$Type$Block> lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + // TODO optimizations + return new $Type$Lookup(this, positions, targetBlockSize); + } + @Override public $Type$Block expand() { incRef(); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ColumnLoadOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ColumnLoadOperator.java new file mode 100644 index 0000000000000..4e06c1f0f4b69 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ColumnLoadOperator.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.ReleasableIterator; + +/** + * {@link Block#lookup Looks up} values from a provided {@link Block} and + * mergeds them into each {@link Page}. + */ +public class ColumnLoadOperator extends AbstractPageMappingToIteratorOperator { + public record Values(String name, Block block) { + @Override + public String toString() { + return name + ":" + block.elementType(); + } + } + + /** + * Factory for {@link ColumnLoadOperator}. It's received {@link Block}s + * are never closed, so we need to build them from a non-tracking factory. + */ + public record Factory(Values values, int positionsOrd) implements OperatorFactory { + @Override + public Operator get(DriverContext driverContext) { + return new ColumnLoadOperator(values, positionsOrd); + } + + @Override + public String describe() { + return "ColumnLoad[values=" + values + ", positions=" + positionsOrd + "]"; + } + } + + private final Values values; + private final int positionsOrd; + + public ColumnLoadOperator(Values values, int positionsOrd) { + this.values = values; + this.positionsOrd = positionsOrd; + } + + /** + * The target size of each loaded block. + * TODO target the size more intelligently + */ + static final ByteSizeValue TARGET_BLOCK_SIZE = ByteSizeValue.ofKb(10); + + @Override + protected ReleasableIterator receive(Page page) { + // TODO tracking is complex for values + /* + * values is likely shared across many threads so tracking it is complex. + * Lookup will incRef it on the way in and decrement the ref on the way + * out but it's not really clear what the right way to get all that thread + * safe is. For now we can ignore this because we're not actually tracking + * the memory of the block. + */ + return appendBlocks(page, values.block.lookup(page.getBlock(positionsOrd), TARGET_BLOCK_SIZE)); + } + + @Override + public String toString() { + return "ColumnLoad[values=" + values + ", positions=" + positionsOrd + "]"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java index 2b77003f11a4f..f821f2a37d1cf 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java @@ -23,19 +23,26 @@ import java.util.List; public class HashLookupOperator extends AbstractPageMappingToIteratorOperator { + public record Key(String name, Block block) { + @Override + public String toString() { + return "{name=" + + name + + ", type=" + + block.elementType() + + ", positions=" + + block.getPositionCount() + + ", size=" + + ByteSizeValue.ofBytes(block.ramBytesUsed()) + + "}"; + } + } + /** * Factory for {@link HashLookupOperator}. It's received {@link Block}s * are never closed, so we need to build them from a non-tracking factory. */ - public static class Factory implements Operator.OperatorFactory { - private final Block[] keys; - private final int[] blockMapping; - - public Factory(Block[] keys, int[] blockMapping) { - this.keys = keys; - this.blockMapping = blockMapping; - } - + public record Factory(Key[] keys, int[] blockMapping) implements Operator.OperatorFactory { @Override public Operator get(DriverContext driverContext) { return new HashLookupOperator(driverContext.blockFactory(), keys, blockMapping); @@ -43,30 +50,23 @@ public Operator get(DriverContext driverContext) { @Override public String describe() { - StringBuilder b = new StringBuilder(); - b.append("HashLookup[keys=["); - for (int k = 0; k < keys.length; k++) { - Block key = keys[k]; - if (k != 0) { - b.append(", "); - } - b.append("{type=").append(key.elementType()); - b.append(", positions=").append(key.getPositionCount()); - b.append(", size=").append(ByteSizeValue.ofBytes(key.ramBytesUsed())).append("}"); - } - b.append("], mapping=").append(Arrays.toString(blockMapping)).append("]"); - return b.toString(); + return "HashLookup[keys=" + Arrays.toString(keys) + ", mapping=" + Arrays.toString(blockMapping) + "]"; } } + private final List keys; private final BlockHash hash; private final int[] blockMapping; - public HashLookupOperator(BlockFactory blockFactory, Block[] keys, int[] blockMapping) { + public HashLookupOperator(BlockFactory blockFactory, Key[] keys, int[] blockMapping) { this.blockMapping = blockMapping; + this.keys = new ArrayList<>(keys.length); + Block[] blocks = new Block[keys.length]; List groups = new ArrayList<>(keys.length); for (int k = 0; k < keys.length; k++) { - groups.add(new BlockHash.GroupSpec(k, keys[k].elementType())); + this.keys.add(keys[k].name); + blocks[k] = keys[k].block; + groups.add(new BlockHash.GroupSpec(k, keys[k].block.elementType())); } /* * Force PackedValuesBlockHash because it assigned ordinals in order @@ -83,7 +83,7 @@ public HashLookupOperator(BlockFactory blockFactory, Block[] keys, int[] blockMa boolean success = false; try { final int[] lastOrd = new int[] { -1 }; - hash.add(new Page(keys), new GroupingAggregatorFunction.AddInput() { + hash.add(new Page(blocks), new GroupingAggregatorFunction.AddInput() { @Override public void add(int positionOffset, IntBlock groupIds) { // TODO support multiple rows with the same keys @@ -128,7 +128,7 @@ protected ReleasableIterator receive(Page page) { @Override public String toString() { - return "HashLookup[hash=" + hash + ", mapping=" + Arrays.toString(blockMapping) + "]"; + return "HashLookup[keys=" + keys + ", hash=" + hash + ", mapping=" + Arrays.toString(blockMapping) + "]"; } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java index 805f26e9ef280..64afb14d22326 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java @@ -370,7 +370,13 @@ public void testHashLookup() { var driver = new Driver( driverContext, new SequenceLongBlockSourceOperator(driverContext.blockFactory(), values, 100), - List.of(new HashLookupOperator(driverContext.blockFactory(), new Block[] { primesBlock }, new int[] { 0 })), + List.of( + new HashLookupOperator( + driverContext.blockFactory(), + new HashLookupOperator.Key[] { new HashLookupOperator.Key("primes", primesBlock) }, + new int[] { 0 } + ) + ), new PageConsumerOperator(page -> { try { BlockTestUtils.readInto(actualValues, page.getBlock(0)); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java index ee505704f762b..6852cd52862b2 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.ShapeTestUtils; @@ -38,6 +39,7 @@ import java.util.stream.IntStream; import java.util.stream.LongStream; +import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; import static org.hamcrest.Matchers.containsString; @@ -47,6 +49,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -192,6 +195,11 @@ public void testIntBlock() { int pos = block.getInt(randomPosition(positionCount)); assertThat(pos, is(block.getInt(pos))); assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup(block, positions(blockFactory, 1, 2, new int[] { 1, 2 }), List.of(List.of(1), List.of(2), List.of(1, 2))); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); try (IntBlock.Builder blockBuilder = blockFactory.newIntBlockBuilder(1)) { IntBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); @@ -237,6 +245,15 @@ public void testConstantIntBlock() { assertThat(value, is(block.getInt(randomPosition(positionCount)))); assertThat(block.isNull(randomPosition(positionCount)), is(false)); assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup( + block, + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(value), List.of(value), List.of(value, value)) + ); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); releaseAndAssertBreaker(block); } } @@ -261,6 +278,11 @@ public void testLongBlock() { int pos = (int) block.getLong(randomPosition(positionCount)); assertThat((long) pos, is(block.getLong(pos))); assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup(block, positions(blockFactory, 1, 2, new int[] { 1, 2 }), List.of(List.of(1L), List.of(2L), List.of(1L, 2L))); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); try (LongBlock.Builder blockBuilder = blockFactory.newLongBlockBuilder(1)) { LongBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); @@ -303,6 +325,15 @@ public void testConstantLongBlock() { assertThat(value, is(block.getLong(randomPosition(positionCount)))); assertThat(block.isNull(randomPosition(positionCount)), is(false)); assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup( + block, + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(value), List.of(value), List.of(value, value)) + ); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); releaseAndAssertBreaker(block); } } @@ -328,6 +359,11 @@ public void testDoubleBlock() { int pos = (int) block.getDouble(randomPosition(positionCount)); assertThat((double) pos, is(block.getDouble(pos))); assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup(block, positions(blockFactory, 1, 2, new int[] { 1, 2 }), List.of(List.of(1d), List.of(2d), List.of(1d, 2d))); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); try (DoubleBlock.Builder blockBuilder = blockFactory.newDoubleBlockBuilder(1)) { DoubleBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); @@ -371,6 +407,15 @@ public void testConstantDoubleBlock() { assertThat(value, is(block.getDouble(positionCount - 1))); assertThat(value, is(block.getDouble(randomPosition(positionCount)))); assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup( + block, + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(value), List.of(value), List.of(value, value)) + ); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); releaseAndAssertBreaker(block); } } @@ -409,6 +454,15 @@ private void testBytesRefBlock(Supplier byteArraySupplier, boolean cho assertions.accept(bytes); } assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup( + block, + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(values[1]), List.of(values[2]), List.of(values[1], values[2])) + ); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); try (BytesRefBlock.Builder blockBuilder = blockFactory.newBytesRefBlockBuilder(1)) { BytesRefBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); @@ -511,6 +565,15 @@ public void testConstantBytesRefBlock() { bytes = block.getBytesRef(randomPosition(positionCount), bytes); assertThat(bytes, is(value)); assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup( + block, + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(value), List.of(value), List.of(value, value)) + ); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); releaseAndAssertBreaker(block); } } @@ -537,6 +600,15 @@ public void testBooleanBlock() { assertThat(block.getBoolean(0), is(true)); assertThat(block.getBoolean(positionCount - 1), is((positionCount - 1) % 10 == 0)); assertSingleValueDenseBlock(block); + if (positionCount > 1) { + assertLookup( + block, + positions(blockFactory, 1, 0, new int[] { 1, 0 }), + List.of(List.of(false), List.of(true), List.of(false, true)) + ); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); try (BooleanBlock.Builder blockBuilder = blockFactory.newBooleanBlockBuilder(1)) { BooleanBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); @@ -577,6 +649,15 @@ public void testConstantBooleanBlock() { assertThat(block.getBoolean(positionCount - 1), is(value)); assertThat(block.getBoolean(randomPosition(positionCount)), is(value)); assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup( + block, + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(value), List.of(value), List.of(value, value)) + ); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); releaseAndAssertBreaker(block); } } @@ -1383,4 +1464,45 @@ private Block randomBigArrayBlock() { } }; } + + static IntBlock positions(BlockFactory blockFactory, Object... positions) { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(positions.length)) { + for (Object p : positions) { + if (p instanceof int[] mv) { + builder.beginPositionEntry(); + for (int v : mv) { + builder.appendInt(v); + } + builder.endPositionEntry(); + continue; + } + if (p instanceof Integer v) { + builder.appendInt(v); + continue; + } + throw new IllegalArgumentException("invalid position: " + p + "(" + p.getClass().getName() + ")"); + } + return builder.build(); + } + } + + static void assertEmptyLookup(BlockFactory blockFactory, Block block) { + try ( + IntBlock positions = positions(blockFactory); + ReleasableIterator lookup = block.lookup(positions, ByteSizeValue.ofKb(100)) + ) { + assertThat(lookup.hasNext(), equalTo(false)); + } + } + + static void assertLookup(Block block, IntBlock positions, List> expected) { + try (positions; ReleasableIterator lookup = block.lookup(positions, ByteSizeValue.ofKb(100))) { + assertThat(lookup.hasNext(), equalTo(true)); + try (Block b = lookup.next()) { + assertThat(valuesAtPositions(b, 0, b.getPositionCount()), equalTo(expected)); + assertThat(b.blockFactory(), sameInstance(positions.blockFactory())); + } + assertThat(lookup.hasNext(), equalTo(false)); + } + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayVectorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayVectorTests.java index 74d7e3e142d04..067cff2feba08 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayVectorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayVectorTests.java @@ -17,8 +17,13 @@ import org.elasticsearch.test.EqualsHashCodeTestUtils; import java.io.IOException; +import java.util.List; import java.util.stream.IntStream; +import static java.util.Collections.singletonList; +import static org.elasticsearch.compute.data.BasicBlockTests.assertEmptyLookup; +import static org.elasticsearch.compute.data.BasicBlockTests.assertLookup; +import static org.elasticsearch.compute.data.BasicBlockTests.positions; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -53,6 +58,15 @@ public void testBoolean() throws IOException { } }); BasicBlockTests.assertSingleValueDenseBlock(vector.asBlock()); + if (positionCount > 1) { + assertLookup( + vector.asBlock(), + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(values[1]), List.of(values[2]), List.of(values[1], values[2])) + ); + } + assertLookup(vector.asBlock(), positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, vector.asBlock()); assertSerialization(block); assertThat(vector.toString(), containsString("BooleanBigArrayVector[positions=" + positionCount)); } @@ -84,6 +98,15 @@ public void testInt() throws IOException { } }); BasicBlockTests.assertSingleValueDenseBlock(vector.asBlock()); + if (positionCount > 1) { + assertLookup( + vector.asBlock(), + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(values[1]), List.of(values[2]), List.of(values[1], values[2])) + ); + } + assertLookup(vector.asBlock(), positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, vector.asBlock()); assertSerialization(block); assertThat(vector.toString(), containsString("IntBigArrayVector[positions=" + positionCount)); } @@ -115,6 +138,15 @@ public void testLong() throws IOException { } }); BasicBlockTests.assertSingleValueDenseBlock(vector.asBlock()); + if (positionCount > 1) { + assertLookup( + vector.asBlock(), + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(values[1]), List.of(values[2]), List.of(values[1], values[2])) + ); + } + assertLookup(vector.asBlock(), positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, vector.asBlock()); assertSerialization(block); assertThat(vector.toString(), containsString("LongBigArrayVector[positions=" + positionCount)); } @@ -146,6 +178,15 @@ public void testDouble() throws IOException { } }); BasicBlockTests.assertSingleValueDenseBlock(vector.asBlock()); + if (positionCount > 1) { + assertLookup( + vector.asBlock(), + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(values[1]), List.of(values[2]), List.of(values[1], values[2])) + ); + } + assertLookup(vector.asBlock(), positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, vector.asBlock()); assertSerialization(block); assertThat(vector.toString(), containsString("DoubleBigArrayVector[positions=" + positionCount)); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockMultiValuedTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockMultiValuedTests.java index 1b0e61cea8135..4579eb688d95e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockMultiValuedTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockMultiValuedTests.java @@ -17,15 +17,20 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; import org.elasticsearch.test.ESTestCase; import org.junit.After; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.function.IntUnaryOperator; import java.util.stream.IntStream; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class BlockMultiValuedTests extends ESTestCase { @ParametersFactory @@ -104,6 +109,18 @@ public void testFilteredJumbledSubsetThenExpanded() { assertFilteredThenExpanded(false, true); } + public void testLookupFromSingleOnePage() { + assertLookup(ByteSizeValue.ofMb(100), between(1, 32), p -> 1); + } + + public void testLookupFromManyOnePage() { + assertLookup(ByteSizeValue.ofMb(100), between(1, 32), p -> between(1, 5)); + } + + public void testLookupFromSingleManyPages() { + assertLookup(ByteSizeValue.ofBytes(1), between(1, 32), p -> 1); + } + private void assertFiltered(boolean all, boolean shuffled) { int positionCount = randomIntBetween(1, 16 * 1024); var b = BasicBlockTests.randomBlock(blockFactory(), elementType, positionCount, nullAllowed, 0, 10, 0, 0); @@ -212,4 +229,74 @@ public void allBreakersEmpty() throws Exception { assertThat("Unexpected used in breaker: " + breaker, breaker.getUsed(), equalTo(0L)); } } + + private void assertLookup(ByteSizeValue targetBytes, int positionsToCopy, IntUnaryOperator positionsPerPosition) { + BlockFactory positionsFactory = blockFactory(); + int positionCount = randomIntBetween(100, 16 * 1024); + var b = BasicBlockTests.randomBlock(blockFactory(), elementType, positionCount, nullAllowed, 0, 100, 0, 0); + try (IntBlock.Builder builder = positionsFactory.newIntBlockBuilder(positionsToCopy);) { + for (int p = 0; p < positionsToCopy; p++) { + int max = positionsPerPosition.applyAsInt(p); + switch (max) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendInt(between(0, positionCount + 100)); + default -> { + builder.beginPositionEntry(); + for (int v = 0; v < max; v++) { + builder.appendInt(between(0, positionCount + 100)); + } + builder.endPositionEntry(); + } + } + } + Block copy = null; + int positionOffset = 0; + try ( + IntBlock positions = builder.build(); + ReleasableIterator lookup = b.block().lookup(positions, targetBytes); + ) { + for (int p = 0; p < positions.getPositionCount(); p++) { + if (copy == null || p - positionOffset == copy.getPositionCount()) { + if (copy != null) { + positionOffset += copy.getPositionCount(); + copy.close(); + } + assertThat(lookup.hasNext(), equalTo(true)); + copy = lookup.next(); + if (positions.getPositionCount() - positionOffset < Operator.MIN_TARGET_PAGE_SIZE) { + assertThat(copy.getPositionCount(), equalTo(positions.getPositionCount() - positionOffset)); + } else { + assertThat(copy.getPositionCount(), greaterThanOrEqualTo(Operator.MIN_TARGET_PAGE_SIZE)); + } + } + List expected = new ArrayList<>(); + int start = positions.getFirstValueIndex(p); + int end = start + positions.getValueCount(p); + for (int i = start; i < end; i++) { + int toCopy = positions.getInt(i); + if (toCopy < b.block().getPositionCount()) { + List v = BasicBlockTests.valuesAtPositions(b.block(), toCopy, toCopy + 1).get(0); + if (v != null) { + expected.addAll(v); + } + } + } + if (expected.isEmpty()) { + assertThat(copy.isNull(p - positionOffset), equalTo(true)); + } else { + assertThat(copy.isNull(p - positionOffset), equalTo(false)); + assertThat( + BasicBlockTests.valuesAtPositions(copy, p - positionOffset, p + 1 - positionOffset).get(0), + equalTo(expected) + ); + } + } + assertThat(lookup.hasNext(), equalTo(false)); + } finally { + Releasables.close(copy); + } + } finally { + b.block().close(); + } + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java index 25d79d0808741..9e0a6470e14c6 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java @@ -68,7 +68,7 @@ public final void testSimpleDescription() { /** * Makes sure the description of {@link #simple} matches the {@link #expectedDescriptionOfSimple}. */ - public final void testSimpleToString() { + public void testSimpleToString() { try (Operator operator = simple().get(driverContext())) { assertThat(operator.toString(), equalTo(expectedToStringOfSimple())); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnLoadOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnLoadOperatorTests.java new file mode 100644 index 0000000000000..c606e4fd4c736 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnLoadOperatorTests.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; + +import java.util.List; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class ColumnLoadOperatorTests extends OperatorTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceIntBlockSourceOperator(blockFactory, IntStream.range(0, size).map(l -> between(0, 4))); + } + + @Override + protected void assertSimpleOutput(List input, List results) { + int count = input.stream().mapToInt(Page::getPositionCount).sum(); + assertThat(results.stream().mapToInt(Page::getPositionCount).sum(), equalTo(count)); + int keysIdx = 0; + int loadedIdx = 0; + IntBlock keys = null; + int keysOffset = 0; + LongBlock loaded = null; + int loadedOffset = 0; + int p = 0; + while (p < count) { + if (keys == null) { + keys = input.get(keysIdx++).getBlock(0); + } + if (loaded == null) { + loaded = results.get(loadedIdx++).getBlock(1); + } + int valueCount = keys.getValueCount(p - keysOffset); + assertThat(loaded.getValueCount(p - loadedOffset), equalTo(valueCount)); + int keysStart = keys.getFirstValueIndex(p - keysOffset); + int loadedStart = loaded.getFirstValueIndex(p - loadedOffset); + for (int k = keysStart, l = loadedStart; k < keysStart + valueCount; k++, l++) { + assertThat(loaded.getLong(l), equalTo(3L * keys.getInt(k))); + } + p++; + if (p - keysOffset == keys.getPositionCount()) { + keysOffset += keys.getPositionCount(); + keys = null; + } + if (p - loadedOffset == loaded.getPositionCount()) { + loadedOffset += loaded.getPositionCount(); + loaded = null; + } + } + } + + @Override + protected Operator.OperatorFactory simple() { + return new ColumnLoadOperator.Factory( + new ColumnLoadOperator.Values( + "values", + TestBlockFactory.getNonBreakingInstance().newLongArrayVector(new long[] { 0, 3, 6, 9, 12 }, 5).asBlock() + ), + 0 + ); + } + + @Override + protected String expectedDescriptionOfSimple() { + return "ColumnLoad[values=values:LONG, positions=0]"; + } + + @Override + protected String expectedToStringOfSimple() { + return expectedDescriptionOfSimple(); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java index ec69297718237..31d3764ac67fc 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java @@ -7,8 +7,9 @@ package org.elasticsearch.compute.operator; -import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.data.TestBlockFactory; @@ -25,24 +26,73 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { @Override protected void assertSimpleOutput(List input, List results) { - assertThat(results.stream().mapToInt(Page::getPositionCount).sum(), equalTo(input.stream().mapToInt(Page::getPositionCount).sum())); + int count = input.stream().mapToInt(Page::getPositionCount).sum(); + assertThat(results.stream().mapToInt(Page::getPositionCount).sum(), equalTo(count)); + int keysIdx = 0; + int ordsIdx = 0; + LongBlock keys = null; + int keysOffset = 0; + IntBlock ords = null; + int ordsOffset = 0; + int p = 0; + while (p < count) { + if (keys == null) { + keys = input.get(keysIdx++).getBlock(0); + } + if (ords == null) { + ords = results.get(ordsIdx++).getBlock(1); + } + int valueCount = keys.getValueCount(p - keysOffset); + assertThat(ords.getValueCount(p - ordsOffset), equalTo(valueCount)); + int keysStart = keys.getFirstValueIndex(p - keysOffset); + int ordsStart = ords.getFirstValueIndex(p - ordsOffset); + for (int k = keysStart, l = ordsStart; k < keysStart + valueCount; k++, l++) { + assertThat(ords.getInt(l), equalTo(switch ((int) keys.getLong(k)) { + case 1 -> 0; + case 7 -> 1; + case 14 -> 2; + case 20 -> 3; + default -> null; + })); + } + p++; + if (p - keysOffset == keys.getPositionCount()) { + keysOffset += keys.getPositionCount(); + keys = null; + } + if (p - ordsOffset == ords.getPositionCount()) { + ordsOffset += ords.getPositionCount(); + ords = null; + } + } } @Override protected Operator.OperatorFactory simple() { return new HashLookupOperator.Factory( - new Block[] { TestBlockFactory.getNonBreakingInstance().newLongArrayVector(new long[] { 7, 14, 20 }, 3).asBlock() }, + new HashLookupOperator.Key[] { + new HashLookupOperator.Key( + "foo", + TestBlockFactory.getNonBreakingInstance().newLongArrayVector(new long[] { 1, 7, 14, 20 }, 4).asBlock() + ) }, new int[] { 0 } ); } @Override protected String expectedDescriptionOfSimple() { - return "HashLookup[keys=[{type=LONG, positions=3, size=96b}], mapping=[0]]"; + return "HashLookup[keys=[{name=foo, type=LONG, positions=4, size=104b}], mapping=[0]]"; } @Override protected String expectedToStringOfSimple() { - return "HashLookup[hash=PackedValuesBlockHash{groups=[0:LONG], entries=3, size=536b}, mapping=[0]]"; + return "HashLookup[keys=[foo], hash=PackedValuesBlockHash{groups=[0:LONG], entries=4, size=544b}, mapping=[0]]"; + } + + @Override + // when you remove this AwaitsFix, also make this method in the superclass final again + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/108045") + public void testSimpleToString() { + super.testSimpleToString(); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java index eebcbc091d3ea..be792a0ef2612 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java @@ -23,6 +23,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockTestUtils; +import org.elasticsearch.compute.data.MockBlockFactory; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.core.Releasables; @@ -112,7 +113,7 @@ public final void testSimpleCircuitBreaking() { private void runWithLimit(Operator.OperatorFactory factory, List input, ByteSizeValue limit) { BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, limit).withCircuitBreaking(); CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); - BlockFactory blockFactory = BlockFactory.getInstance(breaker, bigArrays); + MockBlockFactory blockFactory = new MockBlockFactory(breaker, bigArrays); DriverContext driverContext = new DriverContext(bigArrays, blockFactory); List localInput = CannedSourceOperator.deepCopyOf(blockFactory, input); boolean driverStarted = false; @@ -125,7 +126,8 @@ private void runWithLimit(Operator.OperatorFactory factory, List input, By // if drive hasn't even started then we need to release the input pages manually Releasables.closeExpectNoException(Releasables.wrap(() -> Iterators.map(localInput.iterator(), p -> p::releaseBlocks))); } - assertThat(bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST).getUsed(), equalTo(0L)); + blockFactory.ensureAllBlocksAreReleased(); + assertThat(breaker.getUsed(), equalTo(0L)); } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec index 93ecb003685bc..f038e9e54c9a6 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec @@ -85,16 +85,16 @@ double tau() "cartesian_point to_cartesianpoint(field:cartesian_point|keyword|text)" "cartesian_shape to_cartesianshape(field:cartesian_point|cartesian_shape|keyword|text)" "date to_datetime(field:date|keyword|text|double|long|unsigned_long|integer)" -"double to_dbl(field:boolean|date|keyword|text|double|long|unsigned_long|integer)" +"double to_dbl(field:boolean|date|keyword|text|double|long|unsigned_long|integer|counter_double|counter_integer|counter_long)" "double to_degrees(number:double|integer|long|unsigned_long)" -"double to_double(field:boolean|date|keyword|text|double|long|unsigned_long|integer)" +"double to_double(field:boolean|date|keyword|text|double|long|unsigned_long|integer|counter_double|counter_integer|counter_long)" "date to_dt(field:date|keyword|text|double|long|unsigned_long|integer)" "geo_point to_geopoint(field:geo_point|keyword|text)" "geo_shape to_geoshape(field:geo_point|geo_shape|keyword|text)" -"integer to_int(field:boolean|date|keyword|text|double|long|unsigned_long|integer)" -"integer to_integer(field:boolean|date|keyword|text|double|long|unsigned_long|integer)" +"integer to_int(field:boolean|date|keyword|text|double|long|unsigned_long|integer|counter_integer)" +"integer to_integer(field:boolean|date|keyword|text|double|long|unsigned_long|integer|counter_integer)" "ip to_ip(field:ip|keyword|text)" -"long to_long(field:boolean|date|keyword|text|double|long|unsigned_long|integer)" +"long to_long(field:boolean|date|keyword|text|double|long|unsigned_long|integer|counter_integer|counter_long)" "keyword|text to_lower(str:keyword|text)" "double to_radians(number:double|integer|long|unsigned_long)" "keyword to_str(field:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" @@ -198,16 +198,16 @@ to_boolean |field |"boolean|keyword|text|double to_cartesianpo|field |"cartesian_point|keyword|text" |Input value. The input can be a single- or multi-valued column or an expression. to_cartesiansh|field |"cartesian_point|cartesian_shape|keyword|text" |Input value. The input can be a single- or multi-valued column or an expression. to_datetime |field |"date|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. -to_dbl |field |"boolean|date|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. +to_dbl |field |"boolean|date|keyword|text|double|long|unsigned_long|integer|counter_double|counter_integer|counter_long" |Input value. The input can be a single- or multi-valued column or an expression. to_degrees |number |"double|integer|long|unsigned_long" |Input value. The input can be a single- or multi-valued column or an expression. -to_double |field |"boolean|date|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. +to_double |field |"boolean|date|keyword|text|double|long|unsigned_long|integer|counter_double|counter_integer|counter_long" |Input value. The input can be a single- or multi-valued column or an expression. to_dt |field |"date|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. to_geopoint |field |"geo_point|keyword|text" |Input value. The input can be a single- or multi-valued column or an expression. to_geoshape |field |"geo_point|geo_shape|keyword|text" |Input value. The input can be a single- or multi-valued column or an expression. -to_int |field |"boolean|date|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. -to_integer |field |"boolean|date|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. +to_int |field |"boolean|date|keyword|text|double|long|unsigned_long|integer|counter_integer" |Input value. The input can be a single- or multi-valued column or an expression. +to_integer |field |"boolean|date|keyword|text|double|long|unsigned_long|integer|counter_integer" |Input value. The input can be a single- or multi-valued column or an expression. to_ip |field |"ip|keyword|text" |Input value. The input can be a single- or multi-valued column or an expression. -to_long |field |"boolean|date|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. +to_long |field |"boolean|date|keyword|text|double|long|unsigned_long|integer|counter_integer|counter_long" |Input value. The input can be a single- or multi-valued column or an expression. to_lower |str |"keyword|text" |String expression. If `null`, the function returns `null`. to_radians |number |"double|integer|long|unsigned_long" |Input value. The input can be a single- or multi-valued column or an expression. to_str |field |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" |Input value. The input can be a single- or multi-valued column or an expression. diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 70d6fd6b6d097..3aaace930eed7 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -694,8 +694,7 @@ ca:l|gender:s 0 |null ; - -countFieldVsAll#[skip:-8.14.99, reason:Fixed count(null) in 8.15] +countFieldVsAll#[skip:-8.13.99, reason:Fixed count(null) in 8.14] from employees | stats ca = count(), cn = count(null), cf = count(gender) by gender | sort gender; ca:l|cn:l|cf:l|gender:s @@ -1456,172 +1455,172 @@ rows:l 6 ; -countOfConst#[skip:-8.14.99,reason:supported in 8.15] +countOfConst#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s1 = count(1), s2point1 = count(2.1), s_mv = count([-1, 0, 3]) * 3, s_null = count(null), rows = count(*) +| STATS s1 = count(1), s2point1 = count(2.1), s_mv = count([-1, 0, 3]) * 3, s_null = count(null), s_expr = count(1+1), s_expr_null = count(1+null), rows = count(*) ; -s1:l | s2point1:l | s_mv:l | s_null:l | rows:l -100 | 100 | 900 | 0 | 100 +s1:l | s2point1:l | s_mv:l | s_null:l | s_expr:l | s_expr_null:l | rows:l +100 | 100 | 900 | 0 | 100 | 0 | 100 ; -countOfConstGrouped#[skip:-8.14.99,reason:supported in 8.15] +countOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s2point1 = count("two point one"), s_mv = count([-1, 0, 3]), s_null = count(null), rows = count(*) by languages +| STATS s2point1 = count("two point one"), s_mv = count([-1, 0, 3]), s_null = count(null), s_expr = count(1+1), s_expr_null = count(1+null), rows = count(*) by languages | SORT languages ; -s2point1:l | s_mv:l | s_null:l | rows:l | languages:i -15 | 45 | 0 | 15 | 1 -19 | 57 | 0 | 19 | 2 -17 | 51 | 0 | 17 | 3 -18 | 54 | 0 | 18 | 4 -21 | 63 | 0 | 21 | 5 -10 | 30 | 0 | 10 | null +s2point1:l | s_mv:l | s_null:l | s_expr:l | s_expr_null:l | rows:l | languages:i +15 | 45 | 0 | 15 | 0 | 15 | 1 +19 | 57 | 0 | 19 | 0 | 19 | 2 +17 | 51 | 0 | 17 | 0 | 17 | 3 +18 | 54 | 0 | 18 | 0 | 18 | 4 +21 | 63 | 0 | 21 | 0 | 21 | 5 +10 | 30 | 0 | 10 | 0 | 10 | null ; sumOfConst#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s1 = sum(1), s2point1 = sum(2.1), s_mv = sum([-1, 0, 3]) * 3, s_null = sum(null), rows = count(*) +| STATS s1 = sum(1), s2point1 = sum(2.1), s_mv = sum([-1, 0, 3]) * 3, s_null = sum(null), s_expr = sum(1+1), s_expr_null = sum(1+null), rows = count(*) ; -s1:l | s2point1:d | s_mv:l | s_null:d | rows:l -100 | 210.0 | 600 | null | 100 +s1:l | s2point1:d | s_mv:l | s_null:d | s_expr:l | s_expr_null:l | rows:l +100 | 210.0 | 600 | null | 200 | null | 100 ; sumOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s2point1 = round(sum(2.1), 1), s_mv = sum([-1, 0, 3]), rows = count(*) by languages +| STATS s2point1 = round(sum(2.1), 1), s_mv = sum([-1, 0, 3]), s_expr = sum(1+1), s_expr_null = sum(1+null), rows = count(*) by languages | SORT languages ; -s2point1:d | s_mv:l | rows:l | languages:i -31.5 | 30 | 15 | 1 -39.9 | 38 | 19 | 2 -35.7 | 34 | 17 | 3 -37.8 | 36 | 18 | 4 -44.1 | 42 | 21 | 5 -21.0 | 20 | 10 | null +s2point1:d | s_mv:l | s_expr:l | s_expr_null:l | rows:l | languages:i +31.5 | 30 | 30 | null | 15 | 1 +39.9 | 38 | 38 | null | 19 | 2 +35.7 | 34 | 34 | null | 17 | 3 +37.8 | 36 | 36 | null | 18 | 4 +44.1 | 42 | 42 | null | 21 | 5 +21.0 | 20 | 20 | null | 10 | null ; avgOfConst#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s1 = avg(1), s_mv = avg([-1, 0, 3]) * 3, s_null = avg(null) +| STATS s1 = avg(1), s_mv = avg([-1, 0, 3]) * 3, s_null = avg(null), s_expr = avg(1+1), s_expr_null = avg(1+null) ; -s1:d | s_mv:d | s_null:d -1.0 | 2.0 | null +s1:d | s_mv:d | s_null:d | s_expr:d | s_expr_null:d +1.0 | 2.0 | null | 2.0 | null ; avgOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s2point1 = avg(2.1), s_mv = avg([-1, 0, 3]) * 3 by languages +| STATS s2point1 = avg(2.1), s_mv = avg([-1, 0, 3]) * 3, s_expr = avg(1+1), s_expr_null = avg(1+null) by languages | SORT languages ; -s2point1:d | s_mv:d | languages:i -2.1 | 2.0 | 1 -2.1 | 2.0 | 2 -2.1 | 2.0 | 3 -2.1 | 2.0 | 4 -2.1 | 2.0 | 5 -2.1 | 2.0 | null +s2point1:d | s_mv:d | s_expr:d | s_expr_null:d | languages:i +2.1 | 2.0 | 2.0 | null | 1 +2.1 | 2.0 | 2.0 | null | 2 +2.1 | 2.0 | 2.0 | null | 3 +2.1 | 2.0 | 2.0 | null | 4 +2.1 | 2.0 | 2.0 | null | 5 +2.1 | 2.0 | 2.0 | null | null ; minOfConst#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s1 = min(1), s_mv = min([-1, 0, 3]), s_null = min(null) +| STATS s1 = min(1), s_mv = min([-1, 0, 3]), s_null = min(null), s_expr = min(1+1), s_expr_null = min(1+null) ; -s1:i | s_mv:i | s_null:null -1 | -1 | null +s1:i | s_mv:i | s_null:null | s_expr:i | s_expr_null:i +1 | -1 | null | 2 | null ; minOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s2point1 = min(2.1), s_mv = min([-1, 0, 3]) by languages +| STATS s2point1 = min(2.1), s_mv = min([-1, 0, 3]), s_expr = min(1+1), s_expr_null = min(1+null) by languages | SORT languages ; -s2point1:d | s_mv:i | languages:i -2.1 | -1 | 1 -2.1 | -1 | 2 -2.1 | -1 | 3 -2.1 | -1 | 4 -2.1 | -1 | 5 -2.1 | -1 | null +s2point1:d | s_mv:i | s_expr:i | s_expr_null:i | languages:i +2.1 | -1 | 2 | null | 1 +2.1 | -1 | 2 | null | 2 +2.1 | -1 | 2 | null | 3 +2.1 | -1 | 2 | null | 4 +2.1 | -1 | 2 | null | 5 +2.1 | -1 | 2 | null | null ; maxOfConst#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s1 = max(1), s_mv = max([-1, 0, 3]), s_null = max(null) +| STATS s1 = max(1), s_mv = max([-1, 0, 3]), s_null = max(null), s_expr = max(1+1), s_expr_null = max(1+null) ; -s1:i | s_mv:i | s_null:null -1 | 3 | null +s1:i | s_mv:i | s_null:null | s_expr:i | s_expr_null:i +1 | 3 | null | 2 | null ; maxOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s2point1 = max(2.1), s_mv = max([-1, 0, 3]) by languages +| STATS s2point1 = max(2.1), s_mv = max([-1, 0, 3]), s_expr = max(1+1), s_expr_null = max(1+null) by languages | SORT languages ; -s2point1:d | s_mv:i | languages:i -2.1 | 3 | 1 -2.1 | 3 | 2 -2.1 | 3 | 3 -2.1 | 3 | 4 -2.1 | 3 | 5 -2.1 | 3 | null +s2point1:d | s_mv:i | s_expr:i | s_expr_null:i | languages:i +2.1 | 3 | 2 | null | 1 +2.1 | 3 | 2 | null | 2 +2.1 | 3 | 2 | null | 3 +2.1 | 3 | 2 | null | 4 +2.1 | 3 | 2 | null | 5 +2.1 | 3 | 2 | null | null ; -medianOfConst#[skip:-8.14.99,reason:supported in 8.15] +medianOfConst#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s1 = median(1), s_mv = median([-1, 0, 1, 3]), s_null = median(null) +| STATS s1 = median(1), s_mv = median([-1, 0, 1, 3]), s_null = median(null), s_expr = median(1+1), s_expr_null = median(1+null) ; -s1:d | s_mv:d | s_null:d -1.0 | 0.5 | null +s1:d | s_mv:d | s_null:d | s_expr:d | s_expr_null:d +1.0 | 0.5 | null | 2.0 | null ; -medianOfConstGrouped#[skip:-8.14.99,reason:supported in 8.15] +medianOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s2point1 = median(2.1), s_mv = median([-1, 0, 1, 3]) by languages +| STATS s2point1 = median(2.1), s_mv = median([-1, 0, 1, 3]), s_expr = median(1+1), s_expr_null = median(1+null) by languages | SORT languages ; -s2point1:d | s_mv:d | languages:i -2.1 | 0.5 | 1 -2.1 | 0.5 | 2 -2.1 | 0.5 | 3 -2.1 | 0.5 | 4 -2.1 | 0.5 | 5 -2.1 | 0.5 | null +s2point1:d | s_mv:d | s_expr:d | s_expr_null:d | languages:i +2.1 | 0.5 | 2.0 | null | 1 +2.1 | 0.5 | 2.0 | null | 2 +2.1 | 0.5 | 2.0 | null | 3 +2.1 | 0.5 | 2.0 | null | 4 +2.1 | 0.5 | 2.0 | null | 5 +2.1 | 0.5 | 2.0 | null | null ; -countDistinctOfConst#[skip:-8.14.99,reason:supported in 8.15] +countDistinctOfConst#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s1 = count_distinct(1), s_mv = count_distinct([-1, 0, 3, 1, -1, 3]), s_null = count_distinct(null), s_param = count_distinct([-1, 0, 3, 1, -1, 3], 5) +| STATS s1 = count_distinct(1), s_mv = count_distinct([-1, 0, 3, 1, -1, 3]), s_null = count_distinct(null), s_param = count_distinct([-1, 0, 3, 1, -1, 3], 5), s_expr = count_distinct(1+1), s_expr_null = count_distinct(1+null) ; -s1:l | s_mv:l | s_null:l | s_param:l -1 | 4 | 0 | 4 +s1:l | s_mv:l | s_null:l | s_param:l | s_expr:l | s_expr_null:l +1 | 4 | 0 | 4 | 1 | 0 ; -countDistinctOfConstGrouped#[skip:-8.14.99,reason:supported in 8.15] +countDistinctOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s2point1 = count_distinct("two point one"), s_mv = count_distinct([-1, 0, 3, 1, -1, 3]), s_param = count_distinct([-1, 0, 3, 1, -1, 3], 8000) by languages +| STATS s2point1 = count_distinct("two point one"), s_mv = count_distinct([-1, 0, 3, 1, -1, 3]), s_param = count_distinct([-1, 0, 3, 1, -1, 3], 8000), s_expr = count_distinct(1+1), s_expr_null = count_distinct(1+null) by languages | SORT languages ; -s2point1:l | s_mv:l | s_param:l | languages:i -1 | 4 | 4 | 1 -1 | 4 | 4 | 2 -1 | 4 | 4 | 3 -1 | 4 | 4 | 4 -1 | 4 | 4 | 5 -1 | 4 | 4 | null +s2point1:l | s_mv:l | s_param:l | s_expr:l | s_expr_null:l | languages:i +1 | 4 | 4 | 1 | 0 | 1 +1 | 4 | 4 | 1 | 0 | 2 +1 | 4 | 4 | 1 | 0 | 3 +1 | 4 | 4 | 1 | 0 | 4 +1 | 4 | 4 | 1 | 0 | 5 +1 | 4 | 4 | 1 | 0 | null ; evalOverridingKey#[skip:-8.13.1,reason:fixed in 8.13.2] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/tsdb-mapping.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/tsdb-mapping.json new file mode 100644 index 0000000000000..dd4073d5dc7cf --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/tsdb-mapping.json @@ -0,0 +1,34 @@ +{ + "properties": { + "@timestamp": { + "type": "date" + }, + "metricset": { + "type": "keyword", + "time_series_dimension": true + }, + "name": { + "type": "keyword" + }, + "network": { + "properties": { + "connections": { + "type": "long", + "time_series_metric": "gauge" + }, + "bytes_in": { + "type": "long", + "time_series_metric": "counter" + }, + "bytes_out": { + "type": "long", + "time_series_metric": "counter" + }, + "message_in": { + "type": "float", + "time_series_metric": "counter" + } + } + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java index 5488efda7834f..7e54bf94ac263 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java @@ -61,21 +61,21 @@ protected abstract XContentBuilder valueToXContent(XContentBuilder builder, ToXC public static PositionToXContent positionToXContent(ColumnInfo columnInfo, Block block, BytesRef scratch) { return switch (columnInfo.type()) { - case "long" -> new PositionToXContent(block) { + case "long", "counter_long" -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { return builder.value(((LongBlock) block).getLong(valueIndex)); } }; - case "integer" -> new PositionToXContent(block) { + case "integer", "counter_integer" -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { return builder.value(((IntBlock) block).getInt(valueIndex)); } }; - case "double" -> new PositionToXContent(block) { + case "double", "counter_double" -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java index f467512fd6c0b..ba9aafe03143f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java @@ -123,9 +123,9 @@ static Object valueAtPosition(Block block, int position, String dataType, BytesR private static Object valueAt(String dataType, Block block, int offset, BytesRef scratch) { return switch (dataType) { case "unsigned_long" -> unsignedLongAsNumber(((LongBlock) block).getLong(offset)); - case "long" -> ((LongBlock) block).getLong(offset); - case "integer" -> ((IntBlock) block).getInt(offset); - case "double" -> ((DoubleBlock) block).getDouble(offset); + case "long", "counter_long" -> ((LongBlock) block).getLong(offset); + case "integer", "counter_integer" -> ((IntBlock) block).getInt(offset); + case "double", "counter_double" -> ((DoubleBlock) block).getDouble(offset); case "keyword", "text" -> ((BytesRefBlock) block).getBytesRef(offset, scratch).utf8ToString(); case "ip" -> { BytesRef val = ((BytesRefBlock) block).getBytesRef(offset, scratch); @@ -174,9 +174,9 @@ static Page valuesToPage(BlockFactory blockFactory, List columns, Li case "unsigned_long" -> ((LongBlock.Builder) builder).appendLong( longToUnsignedLong(((Number) value).longValue(), true) ); - case "long" -> ((LongBlock.Builder) builder).appendLong(((Number) value).longValue()); - case "integer" -> ((IntBlock.Builder) builder).appendInt(((Number) value).intValue()); - case "double" -> ((DoubleBlock.Builder) builder).appendDouble(((Number) value).doubleValue()); + case "long", "counter_long" -> ((LongBlock.Builder) builder).appendLong(((Number) value).longValue()); + case "integer", "counter_integer" -> ((IntBlock.Builder) builder).appendInt(((Number) value).intValue()); + case "double", "counter_double" -> ((DoubleBlock.Builder) builder).appendDouble(((Number) value).doubleValue()); case "keyword", "text", "unsupported" -> ((BytesRefBlock.Builder) builder).appendBytesRef( new BytesRef(value.toString()) ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index 2267125304da7..b318e7ed99bc0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -28,6 +28,7 @@ import org.elasticsearch.xpack.ql.expression.AttributeSet; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Expressions; +import org.elasticsearch.xpack.ql.expression.FieldAttribute; import org.elasticsearch.xpack.ql.expression.NamedExpression; import org.elasticsearch.xpack.ql.expression.TypeResolutions; import org.elasticsearch.xpack.ql.expression.function.aggregate.AggregateFunction; @@ -193,6 +194,9 @@ private static void checkAggregate(LogicalPlan p, Set failures) { if (attr != null) { groupRefs.add(attr); } + if (e instanceof FieldAttribute f && EsqlDataTypes.isCounterType(f.dataType())) { + failures.add(fail(e, "cannot group by on [{}] type for grouping [{}]", f.dataType().typeName(), e.sourceText())); + } }); // check aggregates - accept only aggregate functions or expressions over grouping diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java index c26f722d9f765..096dcc183eaf4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java @@ -22,7 +22,6 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.evaluator.mapper.ExpressionMapper; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.ComparisonMapper; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.InMapper; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.InsensitiveEqualsMapper; import org.elasticsearch.xpack.esql.planner.Layout; @@ -40,12 +39,6 @@ public final class EvalMapper { private static final List> MAPPERS = List.of( - ComparisonMapper.EQUALS, - ComparisonMapper.NOT_EQUALS, - ComparisonMapper.GREATER_THAN, - ComparisonMapper.GREATER_THAN_OR_EQUAL, - ComparisonMapper.LESS_THAN, - ComparisonMapper.LESS_THAN_OR_EQUAL, InMapper.IN_MAPPER, new InsensitiveEqualsMapper(), new BooleanLogic(), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java index e536547e006fd..7a084649ac4fa 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java @@ -15,6 +15,7 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.planner.Layout; import org.elasticsearch.xpack.ql.expression.Expression; import java.util.function.Function; @@ -27,6 +28,22 @@ */ public interface EvaluatorMapper { /** + *

+ * Note for implementors: + * If you are implementing this function, you should call the passed-in + * lambda on your children, after doing any other manipulation (casting, + * etc.) necessary. + *

+ *

+ * Note for Callers: + * If you are attempting to call this method, and you have an + * {@link Expression} and a {@link org.elasticsearch.xpack.esql.planner.Layout}, + * you likely want to call {@link org.elasticsearch.xpack.esql.evaluator.EvalMapper#toEvaluator(Expression, Layout)} + * instead. On the other hand, if you already have something that + * looks like the parameter for this method, you should call this method + * with that function. + *

+ *

* Build an {@link ExpressionEvaluator.Factory} for the tree of * expressions rooted at this node. This is only guaranteed to return * a sensible evaluator if this node has a valid type. If this node @@ -35,6 +52,7 @@ public interface EvaluatorMapper { * If {@linkplain Expression#typeResolved} returns an error then * this method may throw. Or return an evaluator that produces * garbage. Or return an evaluator that throws when run. + *

*/ ExpressionEvaluator.Factory toEvaluator(Function toEvaluator); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java deleted file mode 100644 index 85b30032c1070..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; - -import org.elasticsearch.common.TriFunction; -import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; -import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; -import org.elasticsearch.xpack.esql.evaluator.mapper.ExpressionMapper; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cast; -import org.elasticsearch.xpack.esql.planner.Layout; -import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; -import org.elasticsearch.xpack.esql.type.EsqlDataTypes; -import org.elasticsearch.xpack.ql.expression.predicate.BinaryOperator; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.tree.Source; -import org.elasticsearch.xpack.ql.type.DataType; -import org.elasticsearch.xpack.ql.type.DataTypes; - -import static org.elasticsearch.xpack.esql.evaluator.EvalMapper.toEvaluator; - -public abstract class ComparisonMapper extends ExpressionMapper { - public static final ExpressionMapper EQUALS = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsKeywordsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsBoolsEvaluator.Factory::new, - (s, l, r, t) -> new EqualsGeometriesEvaluator.Factory(s, l, r) - ) { - }; - - public static final ExpressionMapper NOT_EQUALS = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsKeywordsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsBoolsEvaluator.Factory::new, - (s, l, r, t) -> new NotEqualsGeometriesEvaluator.Factory(s, l, r) - ) { - }; - - public static final ExpressionMapper GREATER_THAN = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanKeywordsEvaluator.Factory::new - ) { - }; - - public static final ExpressionMapper GREATER_THAN_OR_EQUAL = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqualIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqualLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqualDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqualKeywordsEvaluator.Factory::new - ) { - }; - - public static final ExpressionMapper LESS_THAN = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanKeywordsEvaluator.Factory::new - ) { - }; - - public static final ExpressionMapper LESS_THAN_OR_EQUAL = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqualIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqualLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqualDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqualKeywordsEvaluator.Factory::new - ) { - }; - - private final TriFunction ints; - private final TriFunction longs; - private final TriFunction doubles; - private final TriFunction keywords; - private final TriFunction bools; - private final EvaluatorFunctionWithType geometries; - - @FunctionalInterface - private interface EvaluatorFunctionWithType { - ExpressionEvaluator.Factory apply(Source s, ExpressionEvaluator.Factory t, ExpressionEvaluator.Factory u, T dataType); - } - - private ComparisonMapper( - TriFunction ints, - TriFunction longs, - TriFunction doubles, - TriFunction keywords, - TriFunction bools, - EvaluatorFunctionWithType geometries - ) { - this.ints = ints; - this.longs = longs; - this.doubles = doubles; - this.keywords = keywords; - this.bools = bools; - this.geometries = geometries; - } - - private ComparisonMapper( - TriFunction ints, - TriFunction longs, - TriFunction doubles, - TriFunction keywords, - TriFunction bools - ) { - this.ints = ints; - this.longs = longs; - this.doubles = doubles; - this.keywords = keywords; - this.bools = bools; - this.geometries = (source, lhs, rhs, dataType) -> { throw EsqlIllegalArgumentException.illegalDataType(dataType); }; - } - - ComparisonMapper( - TriFunction ints, - TriFunction longs, - TriFunction doubles, - TriFunction keywords - ) { - this.ints = ints; - this.longs = longs; - this.doubles = doubles; - this.keywords = keywords; - this.bools = (source, lhs, rhs) -> { throw EsqlIllegalArgumentException.illegalDataType(DataTypes.BOOLEAN); }; - this.geometries = (source, lhs, rhs, dataType) -> { throw EsqlIllegalArgumentException.illegalDataType(dataType); }; - } - - @Override - public final ExpressionEvaluator.Factory map(BinaryComparison bc, Layout layout) { - DataType leftType = bc.left().dataType(); - if (leftType.isNumeric()) { - DataType type = EsqlDataTypeRegistry.INSTANCE.commonType(leftType, bc.right().dataType()); - if (type == DataTypes.INTEGER) { - return castToEvaluator(bc, layout, DataTypes.INTEGER, ints); - } - if (type == DataTypes.LONG) { - return castToEvaluator(bc, layout, DataTypes.LONG, longs); - } - if (type == DataTypes.DOUBLE) { - return castToEvaluator(bc, layout, DataTypes.DOUBLE, doubles); - } - if (type == DataTypes.UNSIGNED_LONG) { - // using the long comparators will work on UL as well - return castToEvaluator(bc, layout, DataTypes.UNSIGNED_LONG, longs); - } - } - var leftEval = toEvaluator(bc.left(), layout); - var rightEval = toEvaluator(bc.right(), layout); - if (leftType == DataTypes.KEYWORD || leftType == DataTypes.TEXT || leftType == DataTypes.IP || leftType == DataTypes.VERSION) { - return keywords.apply(bc.source(), leftEval, rightEval); - } - if (leftType == DataTypes.BOOLEAN) { - return bools.apply(bc.source(), leftEval, rightEval); - } - if (leftType == DataTypes.DATETIME) { - return longs.apply(bc.source(), leftEval, rightEval); - } - if (EsqlDataTypes.isSpatial(leftType)) { - return geometries.apply(bc.source(), leftEval, rightEval, leftType); - } - throw new EsqlIllegalArgumentException("resolved type for [" + bc + "] but didn't implement mapping"); - } - - public static ExpressionEvaluator.Factory castToEvaluator( - BinaryOperator op, - Layout layout, - DataType required, - TriFunction factory - ) { - var lhs = Cast.cast(op.source(), op.left().dataType(), required, toEvaluator(op.left(), layout)); - var rhs = Cast.cast(op.source(), op.right().dataType(), required, toEvaluator(op.right(), layout)); - return factory.apply(op.source(), lhs, rhs); - } -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/Equals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/Equals.java index 62eec13af008a..9cc10a555f288 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/Equals.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/Equals.java @@ -12,8 +12,6 @@ import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.predicate.Negatable; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparisonProcessor; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; @@ -22,7 +20,7 @@ import java.time.ZoneId; import java.util.Map; -public class Equals extends EsqlBinaryComparison implements Negatable { +public class Equals extends EsqlBinaryComparison implements Negatable { private static final Map evaluatorMap = Map.ofEntries( Map.entry(DataTypes.BOOLEAN, EqualsBoolsEvaluator.Factory::new), Map.entry(DataTypes.INTEGER, EqualsIntsEvaluator.Factory::new), @@ -41,11 +39,11 @@ public class Equals extends EsqlBinaryComparison implements Negatable evaluatorMap; + private final BinaryComparisonOperation functionType; + + @FunctionalInterface + public interface BinaryOperatorConstructor { + EsqlBinaryComparison apply(Source source, Expression lhs, Expression rhs); + } + + public enum BinaryComparisonOperation implements Writeable { + + EQ(0, "==", BinaryComparisonProcessor.BinaryComparisonOperation.EQ, Equals::new), + // id 1 reserved for NullEquals + NEQ(2, "!=", BinaryComparisonProcessor.BinaryComparisonOperation.NEQ, NotEquals::new), + GT(3, ">", BinaryComparisonProcessor.BinaryComparisonOperation.GT, GreaterThan::new), + GTE(4, ">=", BinaryComparisonProcessor.BinaryComparisonOperation.GTE, GreaterThanOrEqual::new), + LT(5, "<", BinaryComparisonProcessor.BinaryComparisonOperation.LT, LessThan::new), + LTE(6, "<=", BinaryComparisonProcessor.BinaryComparisonOperation.LTE, LessThanOrEqual::new); + + private final int id; + private final String symbol; + // Temporary mapping to the old enum, to satisfy the superclass constructor signature. + private final BinaryComparisonProcessor.BinaryComparisonOperation shim; + private final BinaryOperatorConstructor constructor; + + BinaryComparisonOperation( + int id, + String symbol, + BinaryComparisonProcessor.BinaryComparisonOperation shim, + BinaryOperatorConstructor constructor + ) { + this.id = id; + this.symbol = symbol; + this.shim = shim; + this.constructor = constructor; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(id); + } + + public static BinaryComparisonOperation readFromStream(StreamInput in) throws IOException { + int id = in.readVInt(); + for (BinaryComparisonOperation op : values()) { + if (op.id == id) { + return op; + } + } + throw new IOException("No BinaryComparisonOperation found for id [" + id + "]"); + } + + public EsqlBinaryComparison buildNewInstance(Source source, Expression lhs, Expression rhs) { + return constructor.apply(source, lhs, rhs); + } + } + protected EsqlBinaryComparison( Source source, Expression left, Expression right, - /* TODO: BinaryComparisonOperator is an enum with a bunch of functionality we don't really want. We should extract an interface and - create a symbol only version like we did for BinaryArithmeticOperation. Ideally, they could be the same class. - */ - BinaryComparisonProcessor.BinaryComparisonOperation operation, + BinaryComparisonOperation operation, Map evaluatorMap ) { this(source, left, right, operation, null, evaluatorMap); @@ -49,13 +105,18 @@ protected EsqlBinaryComparison( Source source, Expression left, Expression right, - BinaryComparisonProcessor.BinaryComparisonOperation operation, + BinaryComparisonOperation operation, // TODO: We are definitely not doing the right thing with this zoneId ZoneId zoneId, Map evaluatorMap ) { - super(source, left, right, operation, zoneId); + super(source, left, right, operation.shim, zoneId); this.evaluatorMap = evaluatorMap; + this.functionType = operation; + } + + public BinaryComparisonOperation getFunctionType() { + return functionType; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThan.java index 3eca0e858acbf..09fb32add0f18 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThan.java @@ -11,8 +11,6 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.predicate.Negatable; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparisonProcessor; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; @@ -21,7 +19,7 @@ import java.time.ZoneId; import java.util.Map; -public class GreaterThan extends EsqlBinaryComparison implements Negatable { +public class GreaterThan extends EsqlBinaryComparison implements Negatable { private static final Map evaluatorMap = Map.ofEntries( Map.entry(DataTypes.INTEGER, GreaterThanIntsEvaluator.Factory::new), Map.entry(DataTypes.DOUBLE, GreaterThanDoublesEvaluator.Factory::new), @@ -35,11 +33,11 @@ public class GreaterThan extends EsqlBinaryComparison implements Negatable { +public class GreaterThanOrEqual extends EsqlBinaryComparison implements Negatable { private static final Map evaluatorMap = Map.ofEntries( Map.entry(DataTypes.INTEGER, GreaterThanOrEqualIntsEvaluator.Factory::new), Map.entry(DataTypes.DOUBLE, GreaterThanOrEqualDoublesEvaluator.Factory::new), @@ -35,11 +33,11 @@ public class GreaterThanOrEqual extends EsqlBinaryComparison implements Negatabl ); public GreaterThanOrEqual(Source source, Expression left, Expression right) { - super(source, left, right, BinaryComparisonProcessor.BinaryComparisonOperation.GTE, evaluatorMap); + super(source, left, right, BinaryComparisonOperation.GTE, evaluatorMap); } public GreaterThanOrEqual(Source source, Expression left, Expression right, ZoneId zoneId) { - super(source, left, right, BinaryComparisonProcessor.BinaryComparisonOperation.GTE, zoneId, evaluatorMap); + super(source, left, right, BinaryComparisonOperation.GTE, zoneId, evaluatorMap); } @Override @@ -63,7 +61,7 @@ public LessThan negate() { } @Override - public BinaryComparison reverse() { + public EsqlBinaryComparison reverse() { return new LessThanOrEqual(source(), left(), right(), zoneId()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java index 7b4e867adad91..cea88d3598c2f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java @@ -16,6 +16,7 @@ import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.evaluator.EvalMapper; import org.elasticsearch.xpack.esql.evaluator.mapper.ExpressionMapper; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.planner.Layout; @@ -24,8 +25,6 @@ import java.util.BitSet; import java.util.List; -import static org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.ComparisonMapper.EQUALS; - public class InMapper extends ExpressionMapper { public static final InMapper IN_MAPPER = new InMapper(); @@ -38,7 +37,7 @@ public ExpressionEvaluator.Factory map(In in, Layout layout) { List listEvaluators = new ArrayList<>(in.list().size()); in.list().forEach(e -> { Equals eq = new Equals(in.source(), in.value(), e); - ExpressionEvaluator.Factory eqEvaluator = ((ExpressionMapper) EQUALS).map(eq, layout); + ExpressionEvaluator.Factory eqEvaluator = EvalMapper.toEvaluator(eq, layout); listEvaluators.add(eqEvaluator); }); return dvrCtx -> new InExpressionEvaluator(dvrCtx, listEvaluators.stream().map(fac -> fac.get(dvrCtx)).toList()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThan.java index 6b82df1d67da6..1649706a643c3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThan.java @@ -11,8 +11,6 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.predicate.Negatable; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparisonProcessor; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; @@ -21,7 +19,7 @@ import java.time.ZoneId; import java.util.Map; -public class LessThan extends EsqlBinaryComparison implements Negatable { +public class LessThan extends EsqlBinaryComparison implements Negatable { private static final Map evaluatorMap = Map.ofEntries( Map.entry(DataTypes.INTEGER, LessThanIntsEvaluator.Factory::new), @@ -35,8 +33,12 @@ public class LessThan extends EsqlBinaryComparison implements Negatable { +public class LessThanOrEqual extends EsqlBinaryComparison implements Negatable { private static final Map evaluatorMap = Map.ofEntries( Map.entry(DataTypes.INTEGER, LessThanOrEqualIntsEvaluator.Factory::new), Map.entry(DataTypes.DOUBLE, LessThanOrEqualDoublesEvaluator.Factory::new), @@ -34,8 +32,12 @@ public class LessThanOrEqual extends EsqlBinaryComparison implements Negatable { +public class NotEquals extends EsqlBinaryComparison implements Negatable { private static final Map evaluatorMap = Map.ofEntries( Map.entry(DataTypes.BOOLEAN, NotEqualsBoolsEvaluator.Factory::new), Map.entry(DataTypes.INTEGER, NotEqualsIntsEvaluator.Factory::new), @@ -41,11 +39,11 @@ public class NotEquals extends EsqlBinaryComparison implements Negatable dt.isNumeric() && dt != DataTypes.UNSIGNED_LONG, sourceText(), DEFAULT, - "numeric except unsigned_long" + "numeric except unsigned_long or counter types" ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java index 7ce655bf59962..957f83453cac3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java @@ -9,7 +9,6 @@ import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.CountAggregatorFunction; -import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; @@ -17,6 +16,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; import org.elasticsearch.xpack.esql.planner.ToAggregator; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.expression.Nullability; @@ -31,6 +31,7 @@ import java.util.List; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isType; public class Count extends AggregateFunction implements EnclosedAgg, ToAggregator, SurrogateExpression { @@ -91,7 +92,7 @@ public Nullability nullable() { @Override protected TypeResolution resolveType() { - return EsqlTypeResolutions.isExact(field(), sourceText(), DEFAULT); + return isType(field(), dt -> EsqlDataTypes.isCounterType(dt) == false, sourceText(), DEFAULT, "any type except counter types"); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java index 5e62102aceeaf..b63c070a90ec8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java @@ -90,7 +90,7 @@ protected TypeResolution resolveType() { dt -> resolved && dt != DataTypes.UNSIGNED_LONG, sourceText(), DEFAULT, - "any exact type except unsigned_long" + "any exact type except unsigned_long or counter types" ); if (resolution.unresolved() || precision == null) { return resolution; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Median.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Median.java index eb602df21d9a0..8ca3889352e40 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Median.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Median.java @@ -44,7 +44,7 @@ protected Expression.TypeResolution resolveType() { dt -> dt.isNumeric() && dt != DataTypes.UNSIGNED_LONG, sourceText(), DEFAULT, - "numeric except unsigned_long" + "numeric except unsigned_long or counter types" ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java index 8e1e38441e9a6..799ec58a18a5d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java @@ -40,7 +40,7 @@ protected TypeResolution resolveType() { sourceText(), DEFAULT, "datetime", - "numeric except unsigned_long" + "numeric except unsigned_long or counter types" ); } return isType( @@ -48,7 +48,7 @@ protected TypeResolution resolveType() { dt -> dt.isNumeric() && dt != DataTypes.UNSIGNED_LONG, sourceText(), DEFAULT, - "numeric except unsigned_long" + "numeric except unsigned_long or counter types" ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java index 218d469d626f9..32073d830841f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java @@ -93,7 +93,7 @@ public class Bucket extends GroupingFunction implements Validatable, TwoOptional @Example( description = """ `BUCKET` can work in two modes: one in which the size of the bucket is computed - based on a buckets count recommendation (four parameters) and a range and + based on a buckets count recommendation (four parameters) and a range, and another in which the bucket size is provided directly (two parameters). Using a target number of buckets, a start of a range, and an end of a range, @@ -127,8 +127,8 @@ another in which the bucket size is provided directly (two parameters). @Example(description = """ If the desired bucket size is known in advance, simply provide it as the second argument, leaving the range out:""", file = "bucket", tag = "docsBucketWeeklyHistogramWithSpan", explanation = """ - NOTE: When providing the bucket size as the second parameter, its type must be - of a time duration or date period type."""), + NOTE: When providing the bucket size as the second parameter, it must be a time + duration or date period."""), @Example( description = "`BUCKET` can also operate on numeric fields. For example, to create a salary histogram:", file = "bucket", @@ -138,10 +138,10 @@ another in which the bucket size is provided directly (two parameters). You have to find the `min` and `max` separately. {esql} doesn't yet have an easy way to do that automatically.""" ), @Example(description = """ - If the desired bucket size is known in advance, simply provide it as the second - argument, leaving the range out:""", file = "bucket", tag = "docsBucketNumericWithSpan", explanation = """ - NOTE: When providing the bucket size as the second parameter, its type must be - of a floating type."""), + The range can be omitted if the desired bucket size is known in advance. Simply + provide it as the second argument:""", file = "bucket", tag = "docsBucketNumericWithSpan", explanation = """ + NOTE: When providing the bucket size as the second parameter, it must be + of a floating point type."""), @Example( description = "Create hourly buckets for the last 24 hours, and calculate the number of events per hour:", file = "bucket", @@ -151,6 +151,15 @@ another in which the bucket size is provided directly (two parameters). description = "Create monthly buckets for the year 1985, and calculate the average salary by hiring month", file = "bucket", tag = "bucket_in_agg" + ), + @Example( + description = """ + `BUCKET` may be used in both the aggregating and grouping part of the + <> command provided that in the aggregating + part the function is referenced by an alias defined in the + grouping part, or that it is invoked with the exact same expression:""", + file = "bucket", + tag = "reuseGroupingFunctionWithExpression" ) } ) public Bucket( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlConfigurationFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlConfigurationFunction.java index 0c0ee1e84c2ec..f6dae5bd0117f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlConfigurationFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlConfigurationFunction.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.ql.tree.Source; import java.util.List; +import java.util.Objects; public abstract class EsqlConfigurationFunction extends EsqlScalarFunction { @@ -25,4 +26,19 @@ protected EsqlConfigurationFunction(Source source, List fields, Conf public Configuration configuration() { return configuration; } + + @Override + public int hashCode() { + return Objects.hash(getClass(), children(), configuration); + } + + @Override + public boolean equals(Object obj) { + if (super.equals(obj) == false) { + return false; + } + EsqlConfigurationFunction other = (EsqlConfigurationFunction) obj; + + return configuration.equals(other.configuration); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java index 806bd9b0a12e1..0a9b4a7b7d0f9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java @@ -13,7 +13,6 @@ import org.elasticsearch.xpack.ql.type.DataType; import java.util.Arrays; -import java.util.Objects; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isNumeric; @@ -47,18 +46,4 @@ public final Expression field() { public DataType dataType() { return field.dataType(); } - - @Override - public final int hashCode() { - return Objects.hash(field); - } - - @Override - public final boolean equals(Object obj) { - if (obj == null || obj.getClass() != getClass()) { - return false; - } - UnaryScalarFunction other = (UnaryScalarFunction) obj; - return Objects.equals(other.field, field); - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDouble.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDouble.java index 74cf0c4c1deea..20cb46def4d8b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDouble.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDouble.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; @@ -42,7 +43,10 @@ public class ToDouble extends AbstractConvertFunction { Map.entry(TEXT, ToDoubleFromStringEvaluator.Factory::new), Map.entry(UNSIGNED_LONG, ToDoubleFromUnsignedLongEvaluator.Factory::new), Map.entry(LONG, ToDoubleFromLongEvaluator.Factory::new), // CastLongToDoubleEvaluator would be a candidate, but not MV'd - Map.entry(INTEGER, ToDoubleFromIntEvaluator.Factory::new) // CastIntToDoubleEvaluator would be a candidate, but not MV'd + Map.entry(INTEGER, ToDoubleFromIntEvaluator.Factory::new), // CastIntToDoubleEvaluator would be a candidate, but not MV'd + Map.entry(EsqlDataTypes.COUNTER_DOUBLE, (field, source) -> field), + Map.entry(EsqlDataTypes.COUNTER_INTEGER, ToDoubleFromIntEvaluator.Factory::new), + Map.entry(EsqlDataTypes.COUNTER_LONG, ToDoubleFromLongEvaluator.Factory::new) ); @FunctionInfo( @@ -65,7 +69,18 @@ public ToDouble( Source source, @Param( name = "field", - type = { "boolean", "date", "keyword", "text", "double", "long", "unsigned_long", "integer" }, + type = { + "boolean", + "date", + "keyword", + "text", + "double", + "long", + "unsigned_long", + "integer", + "counter_double", + "counter_integer", + "counter_long" }, description = "Input value. The input can be a single- or multi-valued column or an expression." ) Expression field ) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToInteger.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToInteger.java index d50f1f613b589..32e3b8a77695c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToInteger.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToInteger.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; @@ -43,7 +44,8 @@ public class ToInteger extends AbstractConvertFunction { Map.entry(TEXT, ToIntegerFromStringEvaluator.Factory::new), Map.entry(DOUBLE, ToIntegerFromDoubleEvaluator.Factory::new), Map.entry(UNSIGNED_LONG, ToIntegerFromUnsignedLongEvaluator.Factory::new), - Map.entry(LONG, ToIntegerFromLongEvaluator.Factory::new) + Map.entry(LONG, ToIntegerFromLongEvaluator.Factory::new), + Map.entry(EsqlDataTypes.COUNTER_INTEGER, (fieldEval, source) -> fieldEval) ); @FunctionInfo( @@ -68,7 +70,7 @@ public ToInteger( Source source, @Param( name = "field", - type = { "boolean", "date", "keyword", "text", "double", "long", "unsigned_long", "integer" }, + type = { "boolean", "date", "keyword", "text", "double", "long", "unsigned_long", "integer", "counter_integer" }, description = "Input value. The input can be a single- or multi-valued column or an expression." ) Expression field ) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java index 77973ec49b7e3..c7b77a3c7f2c6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; @@ -43,7 +44,9 @@ public class ToLong extends AbstractConvertFunction { Map.entry(TEXT, ToLongFromStringEvaluator.Factory::new), Map.entry(DOUBLE, ToLongFromDoubleEvaluator.Factory::new), Map.entry(UNSIGNED_LONG, ToLongFromUnsignedLongEvaluator.Factory::new), - Map.entry(INTEGER, ToLongFromIntEvaluator.Factory::new) // CastIntToLongEvaluator would be a candidate, but not MV'd + Map.entry(INTEGER, ToLongFromIntEvaluator.Factory::new), // CastIntToLongEvaluator would be a candidate, but not MV'd + Map.entry(EsqlDataTypes.COUNTER_LONG, (field, source) -> field), + Map.entry(EsqlDataTypes.COUNTER_INTEGER, ToLongFromIntEvaluator.Factory::new) ); @FunctionInfo( @@ -67,7 +70,17 @@ public ToLong( Source source, @Param( name = "field", - type = { "boolean", "date", "keyword", "text", "double", "long", "unsigned_long", "integer" }, + type = { + "boolean", + "date", + "keyword", + "text", + "double", + "long", + "unsigned_long", + "integer", + "counter_integer", + "counter_long" }, description = "Input value. The input can be a single- or multi-valued column or an expression." ) Expression field ) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log.java index cf6cfa5525dc6..ffe92c8c19b3f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log.java @@ -22,7 +22,6 @@ import java.util.Arrays; import java.util.List; -import java.util.Objects; import java.util.function.Function; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; @@ -31,7 +30,8 @@ public class Log extends EsqlScalarFunction implements OptionalArgument { - private final Expression base, value; + private final Expression base; + private final Expression value; @FunctionInfo( returnType = "double", @@ -127,18 +127,4 @@ public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { return new LeftEvaluator.Factory( - source, + source(), context -> new BytesRef(), context -> new UnicodeUtil.UTF8CodePoint(), toEvaluator.apply(str), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java index 6469db23e6ddf..07810a7f9baff 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java @@ -36,11 +36,7 @@ * {code right(foo, len)} is an alias to {code substring(foo, foo.length-len, len)} */ public class Right extends EsqlScalarFunction { - - private final Source source; - private final Expression str; - private final Expression length; @FunctionInfo( @@ -54,7 +50,6 @@ public Right( @Param(name = "length", type = { "integer" }, description = "The number of characters to return.") Expression length ) { super(source, Arrays.asList(str, length)); - this.source = source; this.str = str; this.length = length; } @@ -84,7 +79,7 @@ static BytesRef process( @Override public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { return new RightEvaluator.Factory( - source, + source(), context -> new BytesRef(), context -> new UnicodeUtil.UTF8CodePoint(), toEvaluator.apply(str), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 01fbf9febe26a..71814e6e6ca59 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -20,6 +20,7 @@ import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EsqlBinaryComparison; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.InsensitiveEquals; @@ -136,7 +137,6 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Neg; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Sub; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NullEquals; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Dissect.Parser; @@ -187,8 +187,6 @@ import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNull; import org.elasticsearch.xpack.ql.expression.predicate.operator.arithmetic.ArithmeticOperation; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparisonProcessor; import org.elasticsearch.xpack.ql.expression.predicate.regex.RLikePattern; import org.elasticsearch.xpack.ql.expression.predicate.regex.RegexMatch; import org.elasticsearch.xpack.ql.expression.predicate.regex.WildcardPattern; @@ -316,13 +314,12 @@ public static List namedTypeEntries() { // NamedExpressions of(NamedExpression.class, Alias.class, PlanNamedTypes::writeAlias, PlanNamedTypes::readAlias), // BinaryComparison - of(BinaryComparison.class, Equals.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), - of(BinaryComparison.class, NullEquals.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), - of(BinaryComparison.class, NotEquals.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), - of(BinaryComparison.class, GreaterThan.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), - of(BinaryComparison.class, GreaterThanOrEqual.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), - of(BinaryComparison.class, LessThan.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), - of(BinaryComparison.class, LessThanOrEqual.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), + of(EsqlBinaryComparison.class, Equals.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), + of(EsqlBinaryComparison.class, NotEquals.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), + of(EsqlBinaryComparison.class, GreaterThan.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), + of(EsqlBinaryComparison.class, GreaterThanOrEqual.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), + of(EsqlBinaryComparison.class, LessThan.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), + of(EsqlBinaryComparison.class, LessThanOrEqual.class, PlanNamedTypes::writeBinComparison, PlanNamedTypes::readBinComparison), // InsensitiveEquals of( InsensitiveEquals.class, @@ -1199,26 +1196,19 @@ static void writeUnsupportedEsField(PlanStreamOutput out, UnsupportedEsField uns // -- BinaryComparison - static BinaryComparison readBinComparison(PlanStreamInput in, String name) throws IOException { + static EsqlBinaryComparison readBinComparison(PlanStreamInput in, String name) throws IOException { var source = in.readSource(); - var operation = in.readEnum(BinaryComparisonProcessor.BinaryComparisonOperation.class); + EsqlBinaryComparison.BinaryComparisonOperation operation = EsqlBinaryComparison.BinaryComparisonOperation.readFromStream(in); var left = in.readExpression(); var right = in.readExpression(); + // TODO: Remove zoneId entirely var zoneId = in.readOptionalZoneId(); - return switch (operation) { - case EQ -> new Equals(source, left, right, zoneId); - case NULLEQ -> new NullEquals(source, left, right, zoneId); - case NEQ -> new NotEquals(source, left, right, zoneId); - case GT -> new GreaterThan(source, left, right, zoneId); - case GTE -> new GreaterThanOrEqual(source, left, right, zoneId); - case LT -> new LessThan(source, left, right, zoneId); - case LTE -> new LessThanOrEqual(source, left, right, zoneId); - }; - } - - static void writeBinComparison(PlanStreamOutput out, BinaryComparison binaryComparison) throws IOException { + return operation.buildNewInstance(source, left, right); + } + + static void writeBinComparison(PlanStreamOutput out, EsqlBinaryComparison binaryComparison) throws IOException { out.writeSource(binaryComparison.source()); - out.writeEnum(binaryComparison.function()); + binaryComparison.getFunctionType().writeTo(out); out.writeExpression(binaryComparison.left()); out.writeExpression(binaryComparison.right()); out.writeOptionalZoneId(binaryComparison.zoneId()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index 3ea3bd54da135..e7285bae32408 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -344,7 +344,8 @@ private PhysicalOperation planTopN(TopNExec topNExec, LocalExecutionPlannerConte case "version" -> TopNEncoder.VERSION; case "boolean", "null", "byte", "short", "integer", "long", "double", "float", "half_float", "datetime", "date_period", "time_duration", "object", "nested", "scaled_float", "unsigned_long", "_doc" -> TopNEncoder.DEFAULT_SORTABLE; - case "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" -> TopNEncoder.DEFAULT_UNSORTABLE; + case "geo_point", "cartesian_point", "geo_shape", "cartesian_shape", "counter_long", "counter_integer", "counter_double" -> + TopNEncoder.DEFAULT_UNSORTABLE; // unsupported fields are encoded as BytesRef, we'll use the same encoder; all values should be null at this point case "unsupported" -> TopNEncoder.UNSUPPORTED; default -> throw new EsqlIllegalArgumentException("No TopN sorting encoder for type " + inverse.get(channel).type()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java index 98bf932ce3af8..26c57f13e16c4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java @@ -251,13 +251,16 @@ public static ElementType toElementType(DataType dataType) { * For example, spatial types can be extracted into doc-values under specific conditions, otherwise they extract as BytesRef. */ public static ElementType toElementType(DataType dataType, MappedFieldType.FieldExtractPreference fieldExtractPreference) { - if (dataType == DataTypes.LONG || dataType == DataTypes.DATETIME || dataType == DataTypes.UNSIGNED_LONG) { + if (dataType == DataTypes.LONG + || dataType == DataTypes.DATETIME + || dataType == DataTypes.UNSIGNED_LONG + || dataType == EsqlDataTypes.COUNTER_LONG) { return ElementType.LONG; } - if (dataType == DataTypes.INTEGER) { + if (dataType == DataTypes.INTEGER || dataType == EsqlDataTypes.COUNTER_INTEGER) { return ElementType.INT; } - if (dataType == DataTypes.DOUBLE) { + if (dataType == DataTypes.DOUBLE || dataType == EsqlDataTypes.COUNTER_DOUBLE) { return ElementType.DOUBLE; } // unsupported fields are passed through as a BytesRef diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java index 89c7455baf885..b508e9a4f040c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java @@ -121,6 +121,11 @@ public class EsqlFeatures implements FeatureSpecification { */ public static final NodeFeature MV_ORDERING_SORTED_ASCENDING = new NodeFeature("esql.mv_ordering_sorted_ascending"); + /** + * Support for metrics counter fields + */ + public static final NodeFeature METRICS_COUNTER_FIELDS = new NodeFeature("esql.metrics_counter_fields"); + @Override public Set getFeatures() { return Set.of( @@ -139,7 +144,8 @@ public Set getFeatures() { ST_DISJOINT, STRING_LITERAL_AUTO_CASTING, CASTING_OPERATOR, - MV_ORDERING_SORTED_ASCENDING + MV_ORDERING_SORTED_ASCENDING, + METRICS_COUNTER_FIELDS ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java index 2910a690bf8a0..e763d54a2dcf4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java @@ -10,7 +10,6 @@ import org.elasticsearch.index.mapper.TimeSeriesParams; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypeRegistry; -import org.elasticsearch.xpack.ql.type.DataTypes; import java.util.Collection; @@ -37,10 +36,10 @@ public Collection dataTypes() { @Override public DataType fromEs(String typeName, TimeSeriesParams.MetricType metricType) { if (metricType == TimeSeriesParams.MetricType.COUNTER) { - // Counter fields will be a counter type, for now they are unsupported - return DataTypes.UNSUPPORTED; + return EsqlDataTypes.getCounterType(typeName); + } else { + return EsqlDataTypes.fromName(typeName); } - return EsqlDataTypes.fromName(typeName); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java index 468ffcc2cba2a..44f6844544698 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java @@ -51,6 +51,17 @@ public final class EsqlDataTypes { public static final DataType GEO_SHAPE = new DataType("geo_shape", Integer.MAX_VALUE, false, false, true); public static final DataType CARTESIAN_SHAPE = new DataType("cartesian_shape", Integer.MAX_VALUE, false, false, true); + /** + * These are numeric fields labeled as metric counters in time-series indices. Although stored + * internally as numeric fields, they represent cumulative metrics and must not be treated as regular + * numeric fields. Therefore, we define them differently and separately from their parent numeric field. + * These fields are strictly for use in retrieval from indices, rate aggregation, and casting to their + * parent numeric type. + */ + public static final DataType COUNTER_LONG = new DataType("counter_long", LONG.size(), false, false, LONG.hasDocValues()); + public static final DataType COUNTER_INTEGER = new DataType("counter_integer", INTEGER.size(), false, false, INTEGER.hasDocValues()); + public static final DataType COUNTER_DOUBLE = new DataType("counter_double", DOUBLE.size(), false, false, DOUBLE.hasDocValues()); + private static final Collection TYPES = Stream.of( BOOLEAN, UNSUPPORTED, @@ -77,7 +88,10 @@ public final class EsqlDataTypes { GEO_POINT, CARTESIAN_POINT, CARTESIAN_SHAPE, - GEO_SHAPE + GEO_SHAPE, + COUNTER_LONG, + COUNTER_INTEGER, + COUNTER_DOUBLE ).sorted(Comparator.comparing(DataType::typeName)).toList(); private static final Map NAME_TO_TYPE = TYPES.stream().collect(toUnmodifiableMap(DataType::typeName, t -> t)); @@ -212,7 +226,8 @@ public static boolean isRepresentable(DataType t) { && t != FLOAT && t != SCALED_FLOAT && t != SOURCE - && t != HALF_FLOAT; + && t != HALF_FLOAT + && isCounterType(t) == false; } public static boolean areCompatible(DataType left, DataType right) { @@ -232,4 +247,17 @@ public static DataType widenSmallNumericTypes(DataType type) { } return type; } + + public static DataType getCounterType(String typeName) { + final DataType rootType = widenSmallNumericTypes(fromName(typeName)); + if (rootType == UNSUPPORTED) { + return rootType; + } + assert rootType == LONG || rootType == INTEGER || rootType == DOUBLE : rootType; + return fromTypeName("counter_" + rootType.typeName()); + } + + public static boolean isCounterType(DataType dt) { + return dt == COUNTER_LONG || dt == COUNTER_INTEGER || dt == COUNTER_DOUBLE; + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index bfe4cbc6184ea..06a9319079087 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -446,7 +446,7 @@ private Throwable reworkException(Throwable th) { } // Asserts that the serialization and deserialization of the plan creates an equivalent plan. - private static void opportunisticallyAssertPlanSerialization(PhysicalPlan... plans) { + private void opportunisticallyAssertPlanSerialization(PhysicalPlan... plans) { for (var plan : plans) { var tmp = plan; do { @@ -455,7 +455,7 @@ private static void opportunisticallyAssertPlanSerialization(PhysicalPlan... pla } } while (tmp.children().isEmpty() == false && (tmp = tmp.children().get(0)) != null); - SerializationTestUtils.assertSerialization(plan); + SerializationTestUtils.assertSerialization(plan, configuration); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java index 4be95b95afe54..312250d2f58d0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java @@ -41,7 +41,16 @@ public class SerializationTestUtils { private static final PlanNameRegistry planNameRegistry = new PlanNameRegistry(); public static void assertSerialization(PhysicalPlan plan) { - var deserPlan = serializeDeserialize(plan, PlanStreamOutput::writePhysicalPlanNode, PlanStreamInput::readPhysicalPlanNode); + assertSerialization(plan, EsqlTestUtils.TEST_CFG); + } + + public static void assertSerialization(PhysicalPlan plan, EsqlConfiguration configuration) { + var deserPlan = serializeDeserialize( + plan, + PlanStreamOutput::writePhysicalPlanNode, + PlanStreamInput::readPhysicalPlanNode, + configuration + ); EqualsHashCodeTestUtils.checkEqualsAndHashCode(plan, unused -> deserPlan); } @@ -51,7 +60,16 @@ public static void assertSerialization(LogicalPlan plan) { } public static void assertSerialization(Expression expression) { - Expression deserExpression = serializeDeserialize(expression, PlanStreamOutput::writeExpression, PlanStreamInput::readExpression); + assertSerialization(expression, EsqlTestUtils.TEST_CFG); + } + + public static void assertSerialization(Expression expression, EsqlConfiguration configuration) { + Expression deserExpression = serializeDeserialize( + expression, + PlanStreamOutput::writeExpression, + PlanStreamInput::readExpression, + configuration + ); EqualsHashCodeTestUtils.checkEqualsAndHashCode(expression, unused -> deserExpression); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index 0e2886d099916..79939365181aa 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -143,9 +143,9 @@ private Page randomPage(List columns) { return new Page(columns.stream().map(c -> { Block.Builder builder = PlannerUtils.toElementType(EsqlDataTypes.fromName(c.type())).newBlockBuilder(1, blockFactory); switch (c.type()) { - case "unsigned_long", "long" -> ((LongBlock.Builder) builder).appendLong(randomLong()); - case "integer" -> ((IntBlock.Builder) builder).appendInt(randomInt()); - case "double" -> ((DoubleBlock.Builder) builder).appendDouble(randomDouble()); + case "unsigned_long", "long", "counter_long" -> ((LongBlock.Builder) builder).appendLong(randomLong()); + case "integer", "counter_integer" -> ((IntBlock.Builder) builder).appendInt(randomInt()); + case "double", "counter_double" -> ((DoubleBlock.Builder) builder).appendDouble(randomDouble()); case "keyword" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new BytesRef(randomAlphaOfLength(10))); case "text" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new BytesRef(randomAlphaOfLength(10000))); case "ip" -> ((BytesRefBlock.Builder) builder).appendBytesRef( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java index ad8cb1003eeaa..a94cba52f8f0a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java @@ -134,4 +134,8 @@ public static void loadEnrichPolicyResolution( public static void loadEnrichPolicyResolution(EnrichResolution enrich, String policy, String field, String index, String mapping) { loadEnrichPolicyResolution(enrich, EnrichPolicy.MATCH_TYPE, policy, field, index, mapping); } + + public static IndexResolution tsdbIndexResolution() { + return loadMapping("tsdb-mapping.json", "test"); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 8f474e6cb6a83..3757720cc203a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.session.EsqlIndexResolver; import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Alias; import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.Expressions; @@ -59,6 +60,8 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import java.util.stream.IntStream; import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_VERIFIER; @@ -69,6 +72,7 @@ import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.analyze; import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.analyzer; import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.loadMapping; +import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.tsdbIndexResolution; import static org.elasticsearch.xpack.ql.tree.Source.EMPTY; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; @@ -1626,6 +1630,22 @@ public void testChainedEvalFieldsUse() { assertProjection(query + " | keep x*", IntStream.range(0, additionalEvals + 3).mapToObj(v -> "x" + v).toArray(String[]::new)); } + public void testCounterTypes() { + var query = "FROM test | KEEP network.* | LIMIT 10"; + Analyzer analyzer = analyzer(tsdbIndexResolution()); + LogicalPlan plan = analyze(query, analyzer); + var limit = as(plan, Limit.class); + var attributes = limit.output().stream().collect(Collectors.toMap(NamedExpression::name, a -> a)); + assertThat( + attributes.keySet(), + equalTo(Set.of("network.connections", "network.bytes_in", "network.bytes_out", "network.message_in")) + ); + assertThat(attributes.get("network.connections").dataType(), equalTo(DataTypes.LONG)); + assertThat(attributes.get("network.bytes_in").dataType(), equalTo(EsqlDataTypes.COUNTER_LONG)); + assertThat(attributes.get("network.bytes_out").dataType(), equalTo(EsqlDataTypes.COUNTER_LONG)); + assertThat(attributes.get("network.message_in").dataType(), equalTo(EsqlDataTypes.COUNTER_DOUBLE)); + } + public void testMissingAttributeException_InChainedEval() { var e = expectThrows(VerificationException.class, () -> analyze(""" from test @@ -1777,45 +1797,45 @@ public void testDeferredGroupingInStats() { } public void testUnsupportedTypesInStats() { - verifyUnsupported( - """ - row x = to_unsigned_long(\"10\") - | stats avg(x), count_distinct(x), max(x), median(x), median_absolute_deviation(x), min(x), percentile(x, 10), sum(x) - """, - "Found 8 problems\n" - + "line 2:12: argument of [avg(x)] must be [numeric except unsigned_long], found value [x] type [unsigned_long]\n" - + "line 2:20: argument of [count_distinct(x)] must be [any exact type except unsigned_long], " - + "found value [x] type [unsigned_long]\n" - + "line 2:39: argument of [max(x)] must be [datetime or numeric except unsigned_long], " - + "found value [max(x)] type [unsigned_long]\n" - + "line 2:47: argument of [median(x)] must be [numeric except unsigned_long], found value [x] type [unsigned_long]\n" - + "line 2:58: argument of [median_absolute_deviation(x)] must be [numeric except unsigned_long], " - + "found value [x] type [unsigned_long]\n" - + "line 2:88: argument of [min(x)] must be [datetime or numeric except unsigned_long], " - + "found value [min(x)] type [unsigned_long]\n" - + "line 2:96: first argument of [percentile(x, 10)] must be [numeric except unsigned_long], " - + "found value [x] type [unsigned_long]\n" - + "line 2:115: argument of [sum(x)] must be [numeric except unsigned_long], found value [x] type [unsigned_long]" - ); + verifyUnsupported(""" + row x = to_unsigned_long(\"10\") + | stats avg(x), count_distinct(x), max(x), median(x), median_absolute_deviation(x), min(x), percentile(x, 10), sum(x) + """, """ + Found 8 problems + line 2:12: argument of [avg(x)] must be [numeric except unsigned_long or counter types],\ + found value [x] type [unsigned_long] + line 2:20: argument of [count_distinct(x)] must be [any exact type except unsigned_long or counter types],\ + found value [x] type [unsigned_long] + line 2:39: argument of [max(x)] must be [datetime or numeric except unsigned_long or counter types],\ + found value [max(x)] type [unsigned_long] + line 2:47: argument of [median(x)] must be [numeric except unsigned_long or counter types],\ + found value [x] type [unsigned_long] + line 2:58: argument of [median_absolute_deviation(x)] must be [numeric except unsigned_long or counter types],\ + found value [x] type [unsigned_long] + line 2:88: argument of [min(x)] must be [datetime or numeric except unsigned_long or counter types],\ + found value [min(x)] type [unsigned_long] + line 2:96: first argument of [percentile(x, 10)] must be [numeric except unsigned_long],\ + found value [x] type [unsigned_long] + line 2:115: argument of [sum(x)] must be [numeric except unsigned_long or counter types],\ + found value [x] type [unsigned_long]"""); - verifyUnsupported( - """ - row x = to_version("1.2") - | stats avg(x), max(x), median(x), median_absolute_deviation(x), min(x), percentile(x, 10), sum(x) - """, - "Found 7 problems\n" - + "line 2:10: argument of [avg(x)] must be [numeric except unsigned_long], found value [x] type [version]\n" - + "line 2:18: argument of [max(x)] must be [datetime or numeric except unsigned_long], " - + "found value [max(x)] type [version]\n" - + "line 2:26: argument of [median(x)] must be [numeric except unsigned_long], found value [x] type [version]\n" - + "line 2:37: argument of [median_absolute_deviation(x)] must be [numeric except unsigned_long], " - + "found value [x] type [version]\n" - + "line 2:67: argument of [min(x)] must be [datetime or numeric except unsigned_long], " - + "found value [min(x)] type [version]\n" - + "line 2:75: first argument of [percentile(x, 10)] must be [numeric except unsigned_long], " - + "found value [x] type [version]\n" - + "line 2:94: argument of [sum(x)] must be [numeric except unsigned_long], found value [x] type [version]" - ); + verifyUnsupported(""" + row x = to_version("1.2") + | stats avg(x), max(x), median(x), median_absolute_deviation(x), min(x), percentile(x, 10), sum(x) + """, """ + Found 7 problems + line 2:10: argument of [avg(x)] must be [numeric except unsigned_long or counter types],\ + found value [x] type [version] + line 2:18: argument of [max(x)] must be [datetime or numeric except unsigned_long or counter types],\ + found value [max(x)] type [version] + line 2:26: argument of [median(x)] must be [numeric except unsigned_long or counter types],\ + found value [x] type [version] + line 2:37: argument of [median_absolute_deviation(x)] must be [numeric except unsigned_long or counter types],\ + found value [x] type [version] + line 2:67: argument of [min(x)] must be [datetime or numeric except unsigned_long or counter types],\ + found value [min(x)] type [version] + line 2:75: first argument of [percentile(x, 10)] must be [numeric except unsigned_long], found value [x] type [version] + line 2:94: argument of [sum(x)] must be [numeric except unsigned_long or counter types], found value [x] type [version]"""); } public void testInOnText() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 8d9140cdda5f4..f563e1a6cb25c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -21,12 +21,14 @@ import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.loadMapping; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; //@TestLogging(value = "org.elasticsearch.xpack.esql:TRACE,org.elasticsearch.compute:TRACE", reason = "debug") public class VerifierTests extends ESTestCase { private static final EsqlParser parser = new EsqlParser(); private final Analyzer defaultAnalyzer = AnalyzerTestUtils.expandedDefaultAnalyzer(); + private final Analyzer tsdb = AnalyzerTestUtils.analyzer(AnalyzerTestUtils.tsdbIndexResolution()); public void testIncompatibleTypesInMathOperation() { assertEquals( @@ -72,7 +74,8 @@ public void testAggsExpressionsInStatsAggs() { error("from test | stats max(max(salary)) by first_name") ); assertEquals( - "1:25: argument of [avg(first_name)] must be [numeric except unsigned_long], found value [first_name] type [keyword]", + "1:25: argument of [avg(first_name)] must be [numeric except unsigned_long or counter types]," + + " found value [first_name] type [keyword]", error("from test | stats count(avg(first_name)) by first_name") ); assertEquals( @@ -378,7 +381,8 @@ public void testUnsignedLongNegation() { public void testSumOnDate() { assertEquals( - "1:19: argument of [sum(hire_date)] must be [numeric except unsigned_long], found value [hire_date] type [datetime]", + "1:19: argument of [sum(hire_date)] must be [numeric except unsigned_long or counter types]," + + " found value [hire_date] type [datetime]", error("from test | stats sum(hire_date)") ); } @@ -480,6 +484,39 @@ public void testInlineImpossibleConvert() { assertEquals("1:5: argument of [false::ip] must be [ip or string], found value [false] type [boolean]", error("ROW false::ip")); } + public void testAggregateOnCounter() { + assertThat( + error("FROM tests | STATS min(network.bytes_in)", tsdb), + equalTo( + "1:20: argument of [min(network.bytes_in)] must be [datetime or numeric except unsigned_long or counter types]," + + " found value [min(network.bytes_in)] type [counter_long]" + ) + ); + + assertThat( + error("FROM tests | STATS max(network.bytes_in)", tsdb), + equalTo( + "1:20: argument of [max(network.bytes_in)] must be [datetime or numeric except unsigned_long or counter types]," + + " found value [max(network.bytes_in)] type [counter_long]" + ) + ); + + assertThat( + error("FROM tests | STATS count(network.bytes_out)", tsdb), + equalTo( + "1:20: argument of [count(network.bytes_out)] must be [any type except counter types]," + + " found value [network.bytes_out] type [counter_long]" + ) + ); + } + + public void testGroupByCounter() { + assertThat( + error("FROM tests | STATS count(*) BY network.bytes_in", tsdb), + equalTo("1:32: cannot group by on [counter_long] type for grouping [network.bytes_in]") + ); + } + private String error(String query) { return error(query, defaultAnalyzer); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EsqlBinaryComparisonTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EsqlBinaryComparisonTests.java new file mode 100644 index 0000000000000..5e9e702ff8d12 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EsqlBinaryComparisonTests.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EsqlBinaryComparison.BinaryComparisonOperation; +import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparisonProcessor; + +import java.io.IOException; +import java.util.List; + +public class EsqlBinaryComparisonTests extends ESTestCase { + + public void testSerializationOfBinaryComparisonOperation() throws IOException { + for (BinaryComparisonOperation op : BinaryComparisonOperation.values()) { + BinaryComparisonOperation newOp = copyWriteable( + op, + new NamedWriteableRegistry(List.of()), + BinaryComparisonOperation::readFromStream + ); + assertEquals(op, newOp); + } + } + + /** + * Test that a serialized + * {@link org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparisonProcessor.BinaryComparisonOperation} + * can be read back as a + * {@link BinaryComparisonOperation} + */ + public void testCompatibleWithQLBinaryComparisonOperation() throws IOException { + validateCompatibility(BinaryComparisonProcessor.BinaryComparisonOperation.EQ, BinaryComparisonOperation.EQ); + validateCompatibility(BinaryComparisonProcessor.BinaryComparisonOperation.NEQ, BinaryComparisonOperation.NEQ); + validateCompatibility(BinaryComparisonProcessor.BinaryComparisonOperation.GT, BinaryComparisonOperation.GT); + validateCompatibility(BinaryComparisonProcessor.BinaryComparisonOperation.GTE, BinaryComparisonOperation.GTE); + validateCompatibility(BinaryComparisonProcessor.BinaryComparisonOperation.LT, BinaryComparisonOperation.LT); + validateCompatibility(BinaryComparisonProcessor.BinaryComparisonOperation.LTE, BinaryComparisonOperation.LTE); + } + + private static void validateCompatibility( + BinaryComparisonProcessor.BinaryComparisonOperation original, + BinaryComparisonOperation expected + ) throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setTransportVersion(TransportVersion.current()); + output.writeEnum(original); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), new NamedWriteableRegistry(List.of()))) { + in.setTransportVersion(TransportVersion.current()); + BinaryComparisonOperation newOp = BinaryComparisonOperation.readFromStream(in); + assertEquals(expected, newOp); + } + } + } + +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 3e1fbaa2940eb..772dea0ef4557 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -121,11 +121,11 @@ public static Literal randomLiteral(DataType type) { case "boolean" -> randomBoolean(); case "byte" -> randomByte(); case "short" -> randomShort(); - case "integer" -> randomInt(); - case "unsigned_long", "long" -> randomLong(); + case "integer", "counter_integer" -> randomInt(); + case "unsigned_long", "long", "counter_long" -> randomLong(); case "date_period" -> Period.of(randomIntBetween(-1000, 1000), randomIntBetween(-13, 13), randomIntBetween(-32, 32)); case "datetime" -> randomMillisUpToYear9999(); - case "double", "scaled_float" -> randomDouble(); + case "double", "scaled_float", "counter_double" -> randomDouble(); case "float" -> randomFloat(); case "half_float" -> HalfFloatPoint.sortableShortToHalfFloat(HalfFloatPoint.halfFloatToSortableShort(randomFloat())); case "keyword" -> new BytesRef(randomAlphaOfLength(5)); @@ -946,6 +946,57 @@ protected static String typeErrorMessage(boolean includeOrdinal, List args, EsqlConfiguration configuration); + + @Override + protected Expression build(Source source, List args) { + return buildWithConfiguration(source, args, EsqlTestUtils.TEST_CFG); + } + + static EsqlConfiguration randomConfiguration() { + // TODO: Randomize the query and maybe the pragmas. + return new EsqlConfiguration( + randomZone(), + randomLocale(random()), + randomBoolean() ? null : randomAlphaOfLength(randomInt(64)), + randomBoolean() ? null : randomAlphaOfLength(randomInt(64)), + QueryPragmas.EMPTY, + EsqlPlugin.QUERY_RESULT_TRUNCATION_MAX_SIZE.getDefault(Settings.EMPTY), + EsqlPlugin.QUERY_RESULT_TRUNCATION_DEFAULT_SIZE.getDefault(Settings.EMPTY), + StringUtils.EMPTY, + randomBoolean() + ); + } + + public void testSerializationWithConfiguration() { + EsqlConfiguration config = randomConfiguration(); + Expression expr = buildWithConfiguration(testCase.getSource(), testCase.getDataAsFields(), config); + + assertSerialization(expr, config); + + EsqlConfiguration differentConfig; + do { + differentConfig = randomConfiguration(); + } while (config.equals(differentConfig)); + + Expression differentExpr = buildWithConfiguration(testCase.getSource(), testCase.getDataAsFields(), differentConfig); + assertFalse(expr.equals(differentExpr)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java index 22a00bb3684a6..5527ae4e81bbe 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java @@ -11,9 +11,11 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -113,6 +115,31 @@ public static Iterable parameters() { List.of() ); + TestCaseSupplier.unary( + suppliers, + "Attribute[channel=0]", + List.of(new TestCaseSupplier.TypedDataSupplier("counter", ESTestCase::randomDouble, EsqlDataTypes.COUNTER_DOUBLE)), + DataTypes.DOUBLE, + l -> l, + List.of() + ); + TestCaseSupplier.unary( + suppliers, + evaluatorName.apply("Integer"), + List.of(new TestCaseSupplier.TypedDataSupplier("counter", () -> randomInt(1000), EsqlDataTypes.COUNTER_INTEGER)), + DataTypes.DOUBLE, + l -> ((Integer) l).doubleValue(), + List.of() + ); + TestCaseSupplier.unary( + suppliers, + evaluatorName.apply("Long"), + List.of(new TestCaseSupplier.TypedDataSupplier("counter", () -> randomLongBetween(1, 1000), EsqlDataTypes.COUNTER_LONG)), + DataTypes.DOUBLE, + l -> ((Long) l).doubleValue(), + List.of() + ); + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java index e6f6cb7e978f7..bc27ded5a6dae 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -257,6 +258,15 @@ public static Iterable parameters() { ) ); + TestCaseSupplier.unary( + suppliers, + "Attribute[channel=0]", + List.of(new TestCaseSupplier.TypedDataSupplier("counter", ESTestCase::randomInt, EsqlDataTypes.COUNTER_INTEGER)), + DataTypes.INTEGER, + l -> l, + List.of() + ); + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java index 1879b7ce97ea8..3b123344b4b11 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java @@ -11,8 +11,10 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -208,6 +210,22 @@ public static Iterable parameters() { ) ); + TestCaseSupplier.unary( + suppliers, + "Attribute[channel=0]", + List.of(new TestCaseSupplier.TypedDataSupplier("counter", ESTestCase::randomNonNegativeLong, EsqlDataTypes.COUNTER_LONG)), + DataTypes.LONG, + l -> l, + List.of() + ); + TestCaseSupplier.unary( + suppliers, + evaluatorName.apply("Integer"), + List.of(new TestCaseSupplier.TypedDataSupplier("counter", ESTestCase::randomInt, EsqlDataTypes.COUNTER_INTEGER)), + DataTypes.LONG, + l -> ((Integer) l).longValue(), + List.of() + ); return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java index 7a65c8d468644..d862a07c2fd0f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java @@ -13,8 +13,9 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.xpack.esql.EsqlTestUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractConfigurationFunctionTestCase; +import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Literal; @@ -31,7 +32,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -public class DateExtractTests extends AbstractFunctionTestCase { +public class DateExtractTests extends AbstractConfigurationFunctionTestCase { public DateExtractTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -128,7 +129,7 @@ public void testInvalidChrono() { } @Override - protected Expression build(Source source, List args) { - return new DateExtract(source, args.get(0), args.get(1), EsqlTestUtils.TEST_CFG); + protected Expression buildWithConfiguration(Source source, List args, EsqlConfiguration configuration) { + return new DateExtract(source, args.get(0), args.get(1), configuration); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java index ee4152db2856a..c6c544fced4c4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java @@ -12,9 +12,9 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.xpack.esql.EsqlTestUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractConfigurationFunctionTestCase; +import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -24,7 +24,7 @@ import static org.hamcrest.Matchers.equalTo; -public class DateFormatTests extends AbstractFunctionTestCase { +public class DateFormatTests extends AbstractConfigurationFunctionTestCase { public DateFormatTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -67,7 +67,7 @@ public static Iterable parameters() { } @Override - protected Expression build(Source source, List args) { - return new DateFormat(source, args.get(0), args.get(1), EsqlTestUtils.TEST_CFG); + protected Expression buildWithConfiguration(Source source, List args, EsqlConfiguration configuration) { + return new DateFormat(source, args.get(0), args.get(1), configuration); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java index bd9205c930d51..2e0494723a518 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java @@ -14,8 +14,8 @@ import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.esql.EsqlTestUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractConfigurationFunctionTestCase; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; @@ -32,7 +32,7 @@ import static org.hamcrest.Matchers.equalTo; -public class ToLowerTests extends AbstractFunctionTestCase { +public class ToLowerTests extends AbstractConfigurationFunctionTestCase { public ToLowerTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -72,8 +72,8 @@ private EsqlConfiguration randomLocaleConfig() { } @Override - protected Expression build(Source source, List args) { - return new ToLower(source, args.get(0), EsqlTestUtils.TEST_CFG); + protected Expression buildWithConfiguration(Source source, List args, EsqlConfiguration configuration) { + return new ToLower(source, args.get(0), configuration); } private static TestCaseSupplier supplier(String name, DataType type, Supplier valueSupplier) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java index ce7c011f201d8..f5d0283d0691b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java @@ -14,8 +14,8 @@ import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.esql.EsqlTestUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractConfigurationFunctionTestCase; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; @@ -32,7 +32,7 @@ import static org.hamcrest.Matchers.equalTo; -public class ToUpperTests extends AbstractFunctionTestCase { +public class ToUpperTests extends AbstractConfigurationFunctionTestCase { public ToUpperTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -72,8 +72,8 @@ private EsqlConfiguration randomLocaleConfig() { } @Override - protected Expression build(Source source, List args) { - return new ToUpper(source, args.get(0), EsqlTestUtils.TEST_CFG); + protected Expression buildWithConfiguration(Source source, List args, EsqlConfiguration configuration) { + return new ToUpper(source, args.get(0), configuration); } private static TestCaseSupplier supplier(String name, DataType type, Supplier valueSupplier) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java index 57d86147a5bba..e22fa3c66384b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.SerializationTestUtils; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EsqlBinaryComparison; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; @@ -45,7 +46,6 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mod; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Sub; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NullEquals; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Enrich; @@ -86,7 +86,6 @@ import org.elasticsearch.xpack.ql.expression.function.Function; import org.elasticsearch.xpack.ql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.ql.expression.predicate.operator.arithmetic.ArithmeticOperation; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.ql.index.EsIndex; import org.elasticsearch.xpack.ql.options.EsSourceOptions; import org.elasticsearch.xpack.ql.plan.logical.Filter; @@ -103,10 +102,8 @@ import org.elasticsearch.xpack.ql.type.KeywordEsField; import org.elasticsearch.xpack.ql.type.TextEsField; import org.elasticsearch.xpack.ql.type.UnsupportedEsField; -import org.elasticsearch.xpack.ql.util.DateUtils; import java.io.IOException; -import java.time.ZoneId; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -334,15 +331,15 @@ public void testBinComparisonSimple() throws IOException { var orig = new Equals(Source.EMPTY, field("foo", DataTypes.DOUBLE), field("bar", DataTypes.DOUBLE)); BytesStreamOutput bso = new BytesStreamOutput(); PlanStreamOutput out = new PlanStreamOutput(bso, planNameRegistry); - out.writeNamed(BinaryComparison.class, orig); - var deser = (Equals) planStreamInput(bso).readNamed(BinaryComparison.class); + out.writeNamed(EsqlBinaryComparison.class, orig); + var deser = (Equals) planStreamInput(bso).readNamed(EsqlBinaryComparison.class); EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); } public void testBinComparison() { Stream.generate(PlanNamedTypesTests::randomBinaryComparison) .limit(100) - .forEach(obj -> assertNamedType(BinaryComparison.class, obj)); + .forEach(obj -> assertNamedType(EsqlBinaryComparison.class, obj)); } public void testAggFunctionSimple() throws IOException { @@ -582,18 +579,17 @@ static InvalidMappedField randomInvalidMappedField() { ); } - static BinaryComparison randomBinaryComparison() { - int v = randomIntBetween(0, 6); + static EsqlBinaryComparison randomBinaryComparison() { + int v = randomIntBetween(0, 5); var left = field(randomName(), randomDataType()); var right = field(randomName(), randomDataType()); return switch (v) { - case 0 -> new Equals(Source.EMPTY, left, right, zoneIdOrNull()); - case 1 -> new NullEquals(Source.EMPTY, left, right, zoneIdOrNull()); - case 2 -> new NotEquals(Source.EMPTY, left, right, zoneIdOrNull()); - case 3 -> new GreaterThan(Source.EMPTY, left, right, zoneIdOrNull()); - case 4 -> new GreaterThanOrEqual(Source.EMPTY, left, right, zoneIdOrNull()); - case 5 -> new LessThan(Source.EMPTY, left, right, zoneIdOrNull()); - case 6 -> new LessThanOrEqual(Source.EMPTY, left, right, zoneIdOrNull()); + case 0 -> new Equals(Source.EMPTY, left, right); + case 1 -> new NotEquals(Source.EMPTY, left, right); + case 2 -> new GreaterThan(Source.EMPTY, left, right); + case 3 -> new GreaterThanOrEqual(Source.EMPTY, left, right); + case 4 -> new LessThan(Source.EMPTY, left, right); + case 5 -> new LessThanOrEqual(Source.EMPTY, left, right); default -> throw new AssertionError(v); }; } @@ -635,10 +631,6 @@ static NameId nameIdOrNull() { return randomBoolean() ? new NameId() : null; } - static ZoneId zoneIdOrNull() { - return randomBoolean() ? DateUtils.UTC : null; - } - static Nullability randomNullability() { int i = randomInt(2); return switch (i) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java index 1d2b11d3deb89..7f8124bec6895 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java @@ -152,7 +152,7 @@ public void testEvaluatorSuppliers() { // Test serialization of expressions, since we have convenient access to some expressions. public void testExpressionSerialization() { - SerializationTestUtils.assertSerialization(expression); + SerializationTestUtils.assertSerialization(expression, TEST_CONFIG); } private static FieldAttribute field(String name, DataType type) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistryTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistryTests.java index 93f58398d267f..23d2f8da488e1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistryTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistryTests.java @@ -23,8 +23,12 @@ import static org.hamcrest.Matchers.equalTo; public class EsqlDataTypeRegistryTests extends ESTestCase { + public void testCounter() { - resolve("long", TimeSeriesParams.MetricType.COUNTER, DataTypes.UNSUPPORTED); + resolve("long", TimeSeriesParams.MetricType.COUNTER, EsqlDataTypes.COUNTER_LONG); + resolve("integer", TimeSeriesParams.MetricType.COUNTER, EsqlDataTypes.COUNTER_INTEGER); + resolve("double", TimeSeriesParams.MetricType.COUNTER, EsqlDataTypes.COUNTER_DOUBLE); + } public void testGauge() { diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java index ba4e1d98f63a6..43fc8c63c654b 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; @@ -230,7 +231,9 @@ public void testRetryPointInTime() throws Exception { final OpenPointInTimeRequest openPointInTimeRequest = new OpenPointInTimeRequest(indexName).indicesOptions( IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED ).keepAlive(TimeValue.timeValueMinutes(2)); - final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest).actionGet().getPointInTimeId(); + final BytesReference pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest) + .actionGet() + .getPointInTimeId(); try { assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), searchResponse -> { assertThat(searchResponse.pointInTimeId(), equalTo(pitId)); @@ -277,7 +280,9 @@ public void testPointInTimeWithDeletedIndices() { IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED ).keepAlive(TimeValue.timeValueMinutes(2)); - final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest).actionGet().getPointInTimeId(); + final BytesReference pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest) + .actionGet() + .getPointInTimeId(); try { indicesAdmin().prepareDelete("index-1").get(); // Return partial results if allow partial search result is allowed @@ -312,7 +317,7 @@ public void testOpenPointInTimeWithNoIndexMatched() { final OpenPointInTimeRequest openPointInTimeRequest = new OpenPointInTimeRequest("test-*").indicesOptions( IndicesOptions.strictExpandOpenAndForbidClosed() ).keepAlive(TimeValue.timeValueMinutes(2)); - final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest) + final BytesReference pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest) .actionGet() .getPointInTimeId(); try { @@ -329,7 +334,7 @@ public void testOpenPointInTimeWithNoIndexMatched() { final OpenPointInTimeRequest openPointInTimeRequest = new OpenPointInTimeRequest("test-*").keepAlive( TimeValue.timeValueMinutes(2) ); - final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest) + final BytesReference pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest) .actionGet() .getPointInTimeId(); try { diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java index e934cc3fcc8b2..d1db706562a37 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; @@ -83,7 +84,7 @@ protected Collection> getPlugins() { return pluginList(FrozenIndices.class, LocalStateCompositeXPackPlugin.class); } - String openReaders(TimeValue keepAlive, String... indices) { + BytesReference openReaders(TimeValue keepAlive, String... indices) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).indicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED) .keepAlive(keepAlive); final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); @@ -145,7 +146,7 @@ public void testCloseFreezeAndOpen() throws Exception { } client().prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); - String pitId = openReaders(TimeValue.timeValueMinutes(1), indexName); + BytesReference pitId = openReaders(TimeValue.timeValueMinutes(1), indexName); try { for (int from = 0; from < 3; from++) { assertResponse( diff --git a/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/rest/action/RestFreezeIndexAction.java b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/rest/action/RestFreezeIndexAction.java index 29c008d4c3128..4c63ef72adcb5 100644 --- a/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/rest/action/RestFreezeIndexAction.java +++ b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/rest/action/RestFreezeIndexAction.java @@ -26,6 +26,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestStatus.GONE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public final class RestFreezeIndexAction extends BaseRestHandler { @@ -49,7 +50,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli // translate to a get indices request, so that we'll 404 on non-existent indices final GetIndexRequest getIndexRequest = new GetIndexRequest(); getIndexRequest.indices(Strings.splitStringByCommaToArray(request.param("index"))); - getIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexRequest.masterNodeTimeout())); + getIndexRequest.masterNodeTimeout(getMasterNodeTimeout(request)); getIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, getIndexRequest.indicesOptions())); return channel -> client.admin().indices().getIndex(getIndexRequest, new RestBuilderListener<>(channel) { @Override @@ -63,7 +64,7 @@ public RestResponse buildResponse(GetIndexResponse getIndexResponse, XContentBui FreezeRequest freezeRequest = new FreezeRequest(Strings.splitStringByCommaToArray(request.param("index"))); freezeRequest.ackTimeout(request.paramAsTime("timeout", freezeRequest.ackTimeout())); - freezeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", freezeRequest.masterNodeTimeout())); + freezeRequest.masterNodeTimeout(getMasterNodeTimeout(request)); freezeRequest.indicesOptions(IndicesOptions.fromRequest(request, freezeRequest.indicesOptions())); String waitForActiveShards = request.param("wait_for_active_shards"); if (waitForActiveShards != null) { diff --git a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/frozen/FrozenIndexShardTests.java b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/frozen/FrozenIndexShardTests.java index b3ae0f9707c83..861abaf6f5893 100644 --- a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/frozen/FrozenIndexShardTests.java +++ b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/frozen/FrozenIndexShardTests.java @@ -27,7 +27,7 @@ public void testRecoverFromFrozenPrimary() throws IOException { indexDoc(indexShard, "_doc", "1"); indexDoc(indexShard, "_doc", "2"); indexDoc(indexShard, "_doc", "3"); - indexShard.close("test", true); + flushAndCloseShardNoCheck(indexShard); final ShardRouting shardRouting = indexShard.routingEntry(); IndexShard frozenShard = reinitShard( indexShard, diff --git a/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTests.java b/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTests.java index 10d636c0cf851..39b04d8915b89 100644 --- a/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTests.java +++ b/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; @@ -30,7 +29,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.Locale; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; @@ -53,7 +51,7 @@ public class SamlServiceProviderIndexTests extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return List.of(LocalStateCompositeXPackPlugin.class, IdentityProviderPlugin.class); + return List.of(LocalStateCompositeXPackPlugin.class, IdentityProviderPlugin.class, IndexTemplateRegistryPlugin.class); } @Override @@ -82,11 +80,6 @@ public void testWriteAndFindServiceProvidersFromIndex() { final int count = randomIntBetween(3, 5); List documents = new ArrayList<>(count); - // Install the template - assertTrue("Template should have been installed", installTemplate()); - // No need to install it again - assertFalse("Template should not have been installed a second time", installTemplate()); - // Index should not exist yet assertThat(clusterService.state().metadata().index(SamlServiceProviderIndex.INDEX_NAME), nullValue()); @@ -128,7 +121,6 @@ public void testWriteAndFindServiceProvidersFromIndex() { } public void testWritesViaAliasIfItExists() { - assertTrue(installTemplate()); // Create an index that will trigger the template, but isn't the standard index name final String customIndexName = SamlServiceProviderIndex.INDEX_NAME + "-test"; @@ -155,38 +147,6 @@ public void testWritesViaAliasIfItExists() { assertThat(readDocument(document.docId), equalTo(document)); } - public void testInstallTemplateAutomaticallyOnClusterChange() throws Exception { - // Create an index that will trigger a cluster state change - final String indexName = randomAlphaOfLength(7).toLowerCase(Locale.ROOT); - indicesAdmin().create(new CreateIndexRequest(indexName)).actionGet(); - - ensureGreen(indexName); - - IndexTemplateMetadata templateMeta = clusterService.state().metadata().templates().get(SamlServiceProviderIndex.TEMPLATE_NAME); - - assertBusy(() -> assertThat("template should have been installed", templateMeta, notNullValue())); - - assertFalse("Template is already installed, should not install again", installTemplate()); - } - - public void testInstallTemplateAutomaticallyOnDocumentWrite() { - final SamlServiceProviderDocument doc = randomDocument(1); - writeDocument(doc); - - assertThat(readDocument(doc.docId), equalTo(doc)); - - IndexTemplateMetadata templateMeta = clusterService.state().metadata().templates().get(SamlServiceProviderIndex.TEMPLATE_NAME); - assertThat("template should have been installed", templateMeta, notNullValue()); - - assertFalse("Template is already installed, should not install again", installTemplate()); - } - - private boolean installTemplate() { - final PlainActionFuture installTemplate = new PlainActionFuture<>(); - serviceProviderIndex.installIndexTemplate(assertListenerIsOnlyCalledOnce(installTemplate)); - return installTemplate.actionGet(); - } - private Set getAllDocs() { final PlainActionFuture> future = new PlainActionFuture<>(); serviceProviderIndex.findAll( @@ -264,4 +224,21 @@ private static ActionListener assertListenerIsOnlyCalledOnce(ActionListen }); } + // Since we just want to test the template handling in this test suite, we don't need to go through + // all the hassle of the setup required to *actually* enable the plugin (we do that elsewhere), we + // just need to make sure the template registry is here. + public static class IndexTemplateRegistryPlugin extends Plugin { + @Override + public Collection createComponents(PluginServices services) { + var indexTemplateRegistry = new SamlServiceProviderIndexTemplateRegistry( + services.environment().settings(), + services.clusterService(), + services.threadPool(), + services.client(), + services.xContentRegistry() + ); + indexTemplateRegistry.initialize(); + return List.of(indexTemplateRegistry); + } + } } diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java index e493c8e61ca58..5e6bc5f703879 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java @@ -47,6 +47,7 @@ import org.elasticsearch.xpack.idp.saml.rest.action.RestSamlValidateAuthenticationRequestAction; import org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderFactory; import org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex; +import org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndexTemplateRegistry; import org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderResolver; import org.elasticsearch.xpack.idp.saml.sp.ServiceProviderCacheSettings; import org.elasticsearch.xpack.idp.saml.sp.ServiceProviderDefaults; @@ -80,6 +81,15 @@ public Collection createComponents(PluginServices services) { return List.of(); } + var indexTemplateRegistry = new SamlServiceProviderIndexTemplateRegistry( + services.environment().settings(), + services.clusterService(), + services.threadPool(), + services.client(), + services.xContentRegistry() + ); + indexTemplateRegistry.initialize(); + SamlInit.initialize(); final SamlServiceProviderIndex index = new SamlServiceProviderIndex(services.client(), services.clusterService()); final SecurityContext securityContext = new SecurityContext(settings, services.threadPool().getThreadContext()); @@ -111,7 +121,7 @@ public Collection createComponents(PluginServices services) { final SamlFactory factory = new SamlFactory(); - return List.of(index, idp, factory, userPrivilegeResolver); + return List.of(index, idp, factory, userPrivilegeResolver, indexTemplateRegistry); } @Override diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java index bd425487b9ad0..1eb6c5586a48b 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java @@ -9,12 +9,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequest; @@ -35,7 +33,6 @@ import org.elasticsearch.common.util.CachedSupplier; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.query.QueryBuilder; @@ -46,7 +43,6 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.template.TemplateUtils; import java.io.ByteArrayOutputStream; import java.io.Closeable; @@ -70,15 +66,19 @@ public class SamlServiceProviderIndex implements Closeable { private final ClusterService clusterService; private final ClusterStateListener clusterStateListener; private volatile boolean aliasExists; - private volatile boolean templateInstalled; public static final String ALIAS_NAME = "saml-service-provider"; public static final String INDEX_NAME = "saml-service-provider-v1"; static final String TEMPLATE_NAME = ALIAS_NAME; - private static final String TEMPLATE_RESOURCE = "/idp/saml-service-provider-template.json"; - private static final String TEMPLATE_META_VERSION_KEY = "idp-version"; - private static final String TEMPLATE_VERSION_SUBSTITUTE = "idp.template.version"; + static final String TEMPLATE_RESOURCE = "/idp/saml-service-provider-template.json"; + static final String TEMPLATE_VERSION_VARIABLE = "idp.template.version"; + + // This field is only populated with an old-school version string for BWC purposes + static final String TEMPLATE_VERSION_STRING_DEPRECATED = "idp.template.version_deprecated"; + static final String FINAL_TEMPLATE_VERSION_STRING_DEPRECATED = "8.14.0"; + + static final int CURRENT_TEMPLATE_VERSION = 1; public static final class DocumentVersion { public final String id; @@ -140,34 +140,9 @@ public SamlServiceProviderIndex(Client client, ClusterService clusterService) { private void clusterChanged(ClusterChangedEvent clusterChangedEvent) { final ClusterState state = clusterChangedEvent.state(); - installTemplateIfRequired(state); checkForAliasStateChange(state); } - private void installTemplateIfRequired(ClusterState state) { - if (templateInstalled) { - return; - } - if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { - return; - } - if (isTemplateUpToDate(state)) { - templateInstalled = true; - return; - } - if (state.nodes().isLocalNodeElectedMaster() == false) { - return; - } - installIndexTemplate(ActionListener.wrap(installed -> { - templateInstalled = true; - if (installed) { - logger.debug("Template [{}] has been updated", TEMPLATE_NAME); - } else { - logger.debug("Template [{}] appears to be up to date", TEMPLATE_NAME); - } - }, e -> logger.warn(() -> "Failed to install template [" + TEMPLATE_NAME + "]", e))); - } - private void checkForAliasStateChange(ClusterState state) { final IndexAbstraction aliasInfo = state.getMetadata().getIndicesLookup().get(ALIAS_NAME); final boolean previousState = aliasExists; @@ -199,24 +174,6 @@ private void logChangedAliasState(IndexAbstraction aliasInfo) { } } - public void installIndexTemplate(ActionListener listener) { - final ClusterState state = clusterService.state(); - if (isTemplateUpToDate(state)) { - listener.onResponse(false); - return; - } - final String template = TemplateUtils.loadTemplate(TEMPLATE_RESOURCE, Version.CURRENT.toString(), TEMPLATE_VERSION_SUBSTITUTE); - final PutIndexTemplateRequest request = new PutIndexTemplateRequest(TEMPLATE_NAME).source(template, XContentType.JSON); - client.admin().indices().putTemplate(request, listener.delegateFailureAndWrap((l, response) -> { - logger.info("Installed template [{}]", TEMPLATE_NAME); - l.onResponse(true); - })); - } - - private boolean isTemplateUpToDate(ClusterState state) { - return TemplateUtils.checkTemplateExistsAndIsUpToDate(TEMPLATE_NAME, TEMPLATE_META_VERSION_KEY, state, logger); - } - public void deleteDocument(DocumentVersion version, WriteRequest.RefreshPolicy refreshPolicy, ActionListener listener) { final DeleteRequest request = new DeleteRequest(aliasExists ? ALIAS_NAME : INDEX_NAME).id(version.id) .setIfSeqNo(version.seqNo) @@ -240,19 +197,6 @@ public void writeDocument( return; } - if (templateInstalled) { - _writeDocument(document, opType, refreshPolicy, listener); - } else { - installIndexTemplate(listener.delegateFailureAndWrap((l, installed) -> _writeDocument(document, opType, refreshPolicy, l))); - } - } - - private void _writeDocument( - SamlServiceProviderDocument document, - DocWriteRequest.OpType opType, - WriteRequest.RefreshPolicy refreshPolicy, - ActionListener listener - ) { try ( ByteArrayOutputStream out = new ByteArrayOutputStream(); XContentBuilder xContentBuilder = new XContentBuilder(XContentType.JSON.xContent(), out) diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTemplateRegistry.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTemplateRegistry.java new file mode 100644 index 0000000000000..bd6bdbabbd4f2 --- /dev/null +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTemplateRegistry.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.idp.saml.sp; + +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.core.template.IndexTemplateConfig; +import org.elasticsearch.xpack.core.template.IndexTemplateRegistry; + +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.CURRENT_TEMPLATE_VERSION; +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.FINAL_TEMPLATE_VERSION_STRING_DEPRECATED; +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.TEMPLATE_NAME; +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.TEMPLATE_RESOURCE; +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.TEMPLATE_VERSION_STRING_DEPRECATED; +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.TEMPLATE_VERSION_VARIABLE; + +public class SamlServiceProviderIndexTemplateRegistry extends IndexTemplateRegistry { + public SamlServiceProviderIndexTemplateRegistry( + Settings nodeSettings, + ClusterService clusterService, + ThreadPool threadPool, + Client client, + NamedXContentRegistry xContentRegistry + ) { + super(nodeSettings, clusterService, threadPool, client, xContentRegistry); + } + + @Override + protected String getOrigin() { + return "idp"; + } + + @Override + protected List getLegacyTemplateConfigs() { + return List.of( + new IndexTemplateConfig( + TEMPLATE_NAME, + TEMPLATE_RESOURCE, + CURRENT_TEMPLATE_VERSION, + TEMPLATE_VERSION_VARIABLE, + Map.of(TEMPLATE_VERSION_STRING_DEPRECATED, FINAL_TEMPLATE_VERSION_STRING_DEPRECATED) + ) + ); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestDeleteLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestDeleteLifecycleAction.java index 9cd598f85c00f..b6324ba671162 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestDeleteLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestDeleteLifecycleAction.java @@ -16,6 +16,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestDeleteLifecycleAction extends BaseRestHandler { @@ -34,7 +35,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient String lifecycleName = restRequest.param("name"); DeleteLifecycleAction.Request deleteLifecycleRequest = new DeleteLifecycleAction.Request(lifecycleName); deleteLifecycleRequest.ackTimeout(restRequest.paramAsTime("timeout", deleteLifecycleRequest.ackTimeout())); - deleteLifecycleRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", deleteLifecycleRequest.masterNodeTimeout())); + deleteLifecycleRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(DeleteLifecycleAction.INSTANCE, deleteLifecycleRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestExplainLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestExplainLifecycleAction.java index beae3f4d18194..195f989eab055 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestExplainLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestExplainLifecycleAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestExplainLifecycleAction extends BaseRestHandler { @@ -40,7 +41,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient explainLifecycleRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, IndicesOptions.strictExpandOpen())); explainLifecycleRequest.onlyManaged(restRequest.paramAsBoolean("only_managed", false)); explainLifecycleRequest.onlyErrors(restRequest.paramAsBoolean("only_errors", false)); - explainLifecycleRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", explainLifecycleRequest.masterNodeTimeout())); + explainLifecycleRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(ExplainLifecycleAction.INSTANCE, explainLifecycleRequest, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java index 9631558649ed0..321d7b722c1c9 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestGetLifecycleAction extends BaseRestHandler { @@ -36,7 +37,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient String[] lifecycleNames = Strings.splitStringByCommaToArray(restRequest.param("name")); GetLifecycleAction.Request getLifecycleRequest = new GetLifecycleAction.Request(lifecycleNames); getLifecycleRequest.ackTimeout(restRequest.paramAsTime("timeout", getLifecycleRequest.ackTimeout())); - getLifecycleRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", getLifecycleRequest.masterNodeTimeout())); + getLifecycleRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> new RestCancellableNodeClient(client, restRequest.getHttpChannel()).execute( GetLifecycleAction.INSTANCE, diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetStatusAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetStatusAction.java index 1721510fe7f87..91a201045ba61 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetStatusAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetStatusAction.java @@ -17,6 +17,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestGetStatusAction extends BaseRestHandler { @@ -34,7 +35,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { AcknowledgedRequest.Plain request = new AcknowledgedRequest.Plain(); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(GetStatusAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java index 6d9300f2c6088..f5834f9ae4e46 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestMoveToStepAction extends BaseRestHandler { @@ -40,7 +41,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient request = TransportMoveToStepAction.Request.parseRequest(index, parser); } request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(ILMActions.MOVE_TO_STEP, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestPutLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestPutLifecycleAction.java index 6240ba1a97574..8bd14b083a22d 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestPutLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestPutLifecycleAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestPutLifecycleAction extends BaseRestHandler { @@ -38,7 +39,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient try (XContentParser parser = restRequest.contentParser()) { PutLifecycleRequest putLifecycleRequest = PutLifecycleRequest.parseRequest(lifecycleName, parser); putLifecycleRequest.ackTimeout(restRequest.paramAsTime("timeout", putLifecycleRequest.ackTimeout())); - putLifecycleRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", putLifecycleRequest.masterNodeTimeout())); + putLifecycleRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(ILMActions.PUT, putLifecycleRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRemoveIndexLifecyclePolicyAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRemoveIndexLifecyclePolicyAction.java index 47e2cd0dc69a1..a011aa3d38b64 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRemoveIndexLifecyclePolicyAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRemoveIndexLifecyclePolicyAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestRemoveIndexLifecyclePolicyAction extends BaseRestHandler { @@ -35,7 +36,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { String[] indexes = Strings.splitStringByCommaToArray(restRequest.param("index")); RemoveIndexLifecyclePolicyAction.Request changePolicyRequest = new RemoveIndexLifecyclePolicyAction.Request(indexes); - changePolicyRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", changePolicyRequest.masterNodeTimeout())); + changePolicyRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); changePolicyRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, changePolicyRequest.indicesOptions())); return channel -> client.execute( diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRetryAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRetryAction.java index 87c6fd908ad0e..324266b420f25 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRetryAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRetryAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestRetryAction extends BaseRestHandler { @@ -37,7 +38,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient String[] indices = Strings.splitStringByCommaToArray(restRequest.param("index")); TransportRetryAction.Request request = new TransportRetryAction.Request(indices); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); request.indices(indices); request.indicesOptions(IndicesOptions.fromRequest(restRequest, IndicesOptions.strictExpandOpen())); return channel -> client.execute(ILMActions.RETRY, request, new RestToXContentListener<>(channel)); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStartILMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStartILMAction.java index 7be09625cc799..bbc359de090d7 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStartILMAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStartILMAction.java @@ -17,6 +17,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestStartILMAction extends BaseRestHandler { @@ -34,7 +35,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { StartILMRequest request = new StartILMRequest(); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(ILMActions.START, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStopAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStopAction.java index cb71a7ecb10b9..93704e2ab824f 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStopAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStopAction.java @@ -17,6 +17,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestStopAction extends BaseRestHandler { @@ -34,7 +35,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { StopILMRequest request = new StopILMRequest(); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(ILMActions.STOP, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java index 169861381028c..8edbb7bc14f2c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java @@ -182,7 +182,7 @@ public OpenAiEmbeddingsServiceSettings(StreamInput in) throws IOException { } else { dimensionsSetByUser = false; } - if (in.getTransportVersion().onOrAfter(TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { modelId = in.readString(); } else { modelId = "unset"; @@ -322,7 +322,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.ML_DIMENSIONS_SET_BY_USER_ADDED)) { out.writeBoolean(dimensionsSetByUser); } - if (out.getTransportVersion().onOrAfter(TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { out.writeString(modelId); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java index e306f2d3d2928..b4cf9b27d0ff1 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java @@ -68,7 +68,7 @@ public OpenAiEmbeddingsTaskSettings(@Nullable String user) { } public OpenAiEmbeddingsTaskSettings(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { this.user = in.readOptionalString(); } else { var discard = in.readString(); @@ -102,7 +102,7 @@ public TransportVersion getMinimalSupportedVersion() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { out.writeOptionalString(user); } else { out.writeString("m"); // write any string diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java index 69cb974a4514f..33d5d5982d2b0 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; @@ -19,8 +18,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskCancelledException; -import org.elasticsearch.xpack.core.common.notifications.Level; -import org.elasticsearch.xpack.core.ml.action.AuditMlNotificationAction; import org.elasticsearch.xpack.core.ml.action.PutTrainedModelDefinitionPartAction; import org.elasticsearch.xpack.core.ml.action.PutTrainedModelVocabularyAction; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ModelPackageConfig; @@ -59,7 +56,7 @@ public void doImport() throws URISyntaxException, IOException, ElasticsearchStat if (Strings.isNullOrEmpty(config.getVocabularyFile()) == false) { uploadVocabulary(); - writeDebugNotification(modelId, format("imported model vocabulary [%s]", config.getVocabularyFile())); + logger.debug(() -> format("[%s] imported model vocabulary [%s]", modelId, config.getVocabularyFile())); } URI uri = ModelLoaderUtils.resolvePackageLocation( @@ -152,14 +149,4 @@ private void ex client.execute(action, request).actionGet(); } - - private void writeDebugNotification(String modelId, String message) { - client.execute( - AuditMlNotificationAction.INSTANCE, - new AuditMlNotificationAction.Request(AuditMlNotificationAction.AuditType.INFERENCE, modelId, message, Level.INFO), - ActionListener.noop() - ); - - logger.debug(() -> format("[%s] %s", modelId, message)); - } } diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java index f31f01b7c2aae..b0544806d52bd 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java @@ -141,6 +141,8 @@ static void importModel( try { final long relativeStartNanos = System.nanoTime(); + logAndWriteNotificationAtInfo(auditClient, modelId, "starting model import"); + modelImporter.doImport(); final long totalRuntimeNanos = System.nanoTime() - relativeStartNanos; diff --git a/x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackageTests.java b/x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackageTests.java index a4d7245acba6f..1e10ea48d03db 100644 --- a/x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackageTests.java +++ b/x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackageTests.java @@ -32,6 +32,7 @@ import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; public class TransportLoadTrainedModelPackageTests extends ESTestCase { @@ -53,7 +54,9 @@ public void testSendsFinishedUploadNotification() { ); var notificationArg = ArgumentCaptor.forClass(AuditMlNotificationAction.Request.class); - verify(client).execute(eq(AuditMlNotificationAction.INSTANCE), notificationArg.capture(), any()); + // 2 notifications- the start and finish messages + verify(client, times(2)).execute(eq(AuditMlNotificationAction.INSTANCE), notificationArg.capture(), any()); + // Only the last message is captured assertThat(notificationArg.getValue().getMessage(), CoreMatchers.containsString("finished model import after")); } @@ -145,8 +148,9 @@ private void assertNotificationAndOnFailure(Exception thrownException, Elasticse TransportLoadTrainedModelPackage.importModel(client, taskManager, createRequestWithWaiting(), uploader, listener, task); var notificationArg = ArgumentCaptor.forClass(AuditMlNotificationAction.Request.class); - verify(client).execute(eq(AuditMlNotificationAction.INSTANCE), notificationArg.capture(), any()); - assertThat(notificationArg.getValue().getMessage(), is(message)); + // 2 notifications- the starting message and the failure + verify(client, times(2)).execute(eq(AuditMlNotificationAction.INSTANCE), notificationArg.capture(), any()); + assertThat(notificationArg.getValue().getMessage(), is(message)); // the last message is captured var receivedException = (ElasticsearchStatusException) failureRef.get(); assertThat(receivedException.toString(), is(onFailureException.toString())); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestMlMemoryAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestMlMemoryAction.java index d58e06a35dcaa..56b0436df2c90 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestMlMemoryAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestMlMemoryAction.java @@ -21,13 +21,14 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; @ServerlessScope(Scope.INTERNAL) public class RestMlMemoryAction extends BaseRestHandler { public static final String NODE_ID = "nodeId"; - public static final String MASTER_TIMEOUT = "master_timeout"; + public static final String MASTER_TIMEOUT = REST_MASTER_TIMEOUT_PARAM; public static final String TIMEOUT = "timeout"; @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestSetUpgradeModeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestSetUpgradeModeAction.java index f60b8581e3769..6641aaf66a103 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestSetUpgradeModeAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestSetUpgradeModeAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -43,7 +44,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { SetUpgradeModeAction.Request request = new SetUpgradeModeAction.Request(restRequest.paramAsBoolean("enabled", false)); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(SetUpgradeModeAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestDeleteDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestDeleteDatafeedAction.java index d9ea3b006b7e0..d78a9edb50753 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestDeleteDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestDeleteDatafeedAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -49,7 +50,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient request.setForce(restRequest.paramAsBoolean(CloseJobAction.Request.FORCE.getPreferredName(), request.isForce())); } request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(DeleteDatafeedAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestPutDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestPutDatafeedAction.java index c216afef89fdb..903deb27d2dd9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestPutDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestPutDatafeedAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -52,7 +53,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient putDatafeedRequest = PutDatafeedAction.Request.parseRequest(datafeedId, indicesOptions, parser); } putDatafeedRequest.ackTimeout(restRequest.paramAsTime("timeout", putDatafeedRequest.ackTimeout())); - putDatafeedRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", putDatafeedRequest.masterNodeTimeout())); + putDatafeedRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(PutDatafeedAction.INSTANCE, putDatafeedRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestUpdateDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestUpdateDatafeedAction.java index f51f37715cdc4..c11f4ad367812 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestUpdateDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestUpdateDatafeedAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -58,7 +59,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient updateDatafeedRequest = UpdateDatafeedAction.Request.parseRequest(datafeedId, indicesOptions, parser); } updateDatafeedRequest.ackTimeout(restRequest.paramAsTime("timeout", updateDatafeedRequest.ackTimeout())); - updateDatafeedRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", updateDatafeedRequest.masterNodeTimeout())); + updateDatafeedRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(UpdateDatafeedAction.INSTANCE, updateDatafeedRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/filter/RestDeleteFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/filter/RestDeleteFilterAction.java index d26d0091e1acf..2c3f84401b912 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/filter/RestDeleteFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/filter/RestDeleteFilterAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -44,7 +45,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { Request request = new Request(restRequest.param(Request.FILTER_ID.getPreferredName())); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(DeleteFilterAction.INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestUpdateTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestUpdateTrainedModelDeploymentAction.java index db21a4278df24..29c63f6f60fcc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestUpdateTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestUpdateTrainedModelDeploymentAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestUpdateTrainedModelDeploymentAction extends BaseRestHandler { @@ -51,7 +52,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient XContentParser parser = restRequest.contentParser(); UpdateTrainedModelDeploymentAction.Request request = UpdateTrainedModelDeploymentAction.Request.parseRequest(modelId, parser); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(UpdateTrainedModelDeploymentAction.INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteJobAction.java index 81cc9ab036bb9..659fb6ba2e271 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteJobAction.java @@ -26,6 +26,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -51,7 +52,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient DeleteJobAction.Request deleteJobRequest = new DeleteJobAction.Request(restRequest.param(Job.ID.getPreferredName())); deleteJobRequest.setForce(restRequest.paramAsBoolean(CloseJobAction.Request.FORCE.getPreferredName(), deleteJobRequest.isForce())); deleteJobRequest.ackTimeout(restRequest.paramAsTime("timeout", deleteJobRequest.ackTimeout())); - deleteJobRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", deleteJobRequest.masterNodeTimeout())); + deleteJobRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); deleteJobRequest.setDeleteUserAnnotations(restRequest.paramAsBoolean("delete_user_annotations", false)); if (restRequest.paramAsBoolean("wait_for_completion", true)) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPostJobUpdateAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPostJobUpdateAction.java index 6add232cfecb8..a3cb1016756e1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPostJobUpdateAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPostJobUpdateAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -47,7 +48,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient XContentParser parser = restRequest.contentParser(); UpdateJobAction.Request updateJobRequest = UpdateJobAction.Request.parseRequest(jobId, parser); updateJobRequest.ackTimeout(restRequest.paramAsTime("timeout", updateJobRequest.ackTimeout())); - updateJobRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", updateJobRequest.masterNodeTimeout())); + updateJobRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(UpdateJobAction.INSTANCE, updateJobRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPutJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPutJobAction.java index 7bff218114b71..b8ce60519189f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPutJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPutJobAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -50,7 +51,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient IndicesOptions indicesOptions = IndicesOptions.fromRequest(restRequest, SearchRequest.DEFAULT_INDICES_OPTIONS); PutJobAction.Request putJobRequest = PutJobAction.Request.parseRequest(jobId, parser, indicesOptions); putJobRequest.ackTimeout(restRequest.paramAsTime("timeout", putJobRequest.ackTimeout())); - putJobRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", putJobRequest.masterNodeTimeout())); + putJobRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(PutJobAction.INSTANCE, putJobRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestResetJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestResetJobAction.java index 28271ec578fb0..39fe102ee08be 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestResetJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestResetJobAction.java @@ -25,6 +25,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; @ServerlessScope(Scope.PUBLIC) @@ -44,7 +45,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { ResetJobAction.Request request = new ResetJobAction.Request(restRequest.param(Job.ID.getPreferredName())); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); request.setDeleteUserAnnotations(restRequest.paramAsBoolean("delete_user_annotations", false)); if (restRequest.paramAsBoolean("wait_for_completion", true)) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/modelsnapshots/RestRevertModelSnapshotAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/modelsnapshots/RestRevertModelSnapshotAction.java index 4478552a22a9e..356e7cdd49635 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/modelsnapshots/RestRevertModelSnapshotAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/modelsnapshots/RestRevertModelSnapshotAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction.Request.SNAPSHOT_ID; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -66,7 +67,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient ); } request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(RevertModelSnapshotAction.INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java index 9bfea90a28489..f57ed36540f39 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java @@ -32,6 +32,7 @@ import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; /** * {@code PublishableHttpResource} represents an {@link HttpResource} that is a single file or object that can be checked and @@ -112,7 +113,7 @@ protected PublishableHttpResource( final Map parameters = Maps.newMapWithExpectedSize(baseParameters.size() + 1); parameters.putAll(baseParameters); - parameters.put("master_timeout", masterTimeout.toString()); + parameters.put(REST_MASTER_TIMEOUT_PARAM, masterTimeout.toString()); this.defaultParameters = Collections.unmodifiableMap(parameters); } else { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java index b72891708e780..cbb225462b858 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java @@ -33,6 +33,7 @@ import static java.util.stream.Collectors.joining; import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; import static org.elasticsearch.xpack.monitoring.exporter.http.AsyncHttpResourceHelper.mockBooleanActionListener; import static org.elasticsearch.xpack.monitoring.exporter.http.AsyncHttpResourceHelper.mockPublishResultActionListener; import static org.elasticsearch.xpack.monitoring.exporter.http.AsyncHttpResourceHelper.whenPerformRequestAsyncWith; @@ -228,9 +229,9 @@ protected void assertParameters(final PublishableHttpResource resource) { final Map parameters = new HashMap<>(resource.getDefaultParameters()); if (masterTimeout != null && TimeValue.MINUS_ONE.equals(masterTimeout) == false) { - assertThat(parameters.remove("master_timeout"), is(masterTimeout.toString())); + assertThat(parameters.remove(REST_MASTER_TIMEOUT_PARAM), is(masterTimeout.toString())); } else { - assertFalse(parameters.containsKey("master_timeout")); + assertFalse(parameters.containsKey(REST_MASTER_TIMEOUT_PARAM)); } assertThat(parameters.remove("filter_path"), is("$NONE")); @@ -241,9 +242,9 @@ protected void assertVersionParameters(final PublishableHttpResource resource) { final Map parameters = new HashMap<>(resource.getDefaultParameters()); if (masterTimeout != null && TimeValue.MINUS_ONE.equals(masterTimeout) == false) { - assertThat(parameters.remove("master_timeout"), is(masterTimeout.toString())); + assertThat(parameters.remove(REST_MASTER_TIMEOUT_PARAM), is(masterTimeout.toString())); } else { - assertFalse(parameters.containsKey("master_timeout")); + assertFalse(parameters.containsKey(REST_MASTER_TIMEOUT_PARAM)); } assertThat(parameters.remove("filter_path"), is("*.version")); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java index 64237886d5f7b..fec656e76cc0d 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java @@ -49,6 +49,7 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -764,7 +765,10 @@ private void assertMasterTimeoutSet(final List resources if (timeout != null) { for (final HttpResource resource : resources) { if (resource instanceof PublishableHttpResource) { - assertEquals(timeout.getStringRep(), ((PublishableHttpResource) resource).getDefaultParameters().get("master_timeout")); + assertEquals( + timeout.getStringRep(), + ((PublishableHttpResource) resource).getDefaultParameters().get(REST_MASTER_TIMEOUT_PARAM) + ); } } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/GetFlamegraphResponse.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/GetFlamegraphResponse.java index e4ea3c1521d22..f93223d0e3e49 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/GetFlamegraphResponse.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/GetFlamegraphResponse.java @@ -173,6 +173,7 @@ public long getTotalSamples() { return totalSamples; } + @UpdateForV9 // change casing from Camel Case to Snake Case (requires updates in Kibana as well) @Override public Iterator toXContentChunked(ToXContent.Params params) { return Iterators.concat( diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/rest/RestGetStatusAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/rest/RestGetStatusAction.java index e1f8ac16f15ec..c5fcde1f7ec94 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/rest/RestGetStatusAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/rest/RestGetStatusAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestGetStatusAction extends BaseRestHandler { @@ -36,7 +37,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { GetStatusAction.Request request = new GetStatusAction.Request(); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); request.waitForResourcesCreated(restRequest.paramAsBoolean("wait_for_resources_created", false)); return channel -> client.execute( GetStatusAction.INSTANCE, diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/Types.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/Types.java index a19f4c634f77c..00f776db29fb6 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/Types.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/Types.java @@ -53,7 +53,13 @@ private static DataType getType(DataTypeRegistry typeRegistry, Map loadMapping(DataTypeRegistry registry, String private static Map loadMapping(DataTypeRegistry registry, InputStream stream, Boolean ordered) { boolean order = ordered != null ? ordered.booleanValue() : randomBoolean(); try (InputStream in = stream) { - return Types.fromEs(registry, XContentHelper.convertToMap(JsonXContent.jsonXContent, in, order)); + Map map = XContentHelper.convertToMap(JsonXContent.jsonXContent, in, order); + return Types.fromEs(registry, map); } catch (IOException ex) { throw new RuntimeException(ex); } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java index e3b631ba69c8a..c50fe50db8b40 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexService; @@ -141,7 +142,7 @@ public void testRetryPointInTime() throws Exception { final OpenPointInTimeRequest openRequest = new OpenPointInTimeRequest(indexName).indicesOptions( IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED ).keepAlive(TimeValue.timeValueMinutes(2)); - final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openRequest).actionGet().getPointInTimeId(); + final BytesReference pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openRequest).actionGet().getPointInTimeId(); try { assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertThat(resp.pointInTimeId(), equalTo(pitId)); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java index a40f21c0de08d..a21e3e6beabce 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java @@ -38,6 +38,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -484,7 +485,7 @@ public void onFailure(Exception e) { }); } - private static ActionListener closingPitBefore(Client client, String pointInTimeId, ActionListener listener) { + private static ActionListener closingPitBefore(Client client, BytesReference pointInTimeId, ActionListener listener) { return new ActionListener<>() { @Override public void onResponse(Void unused) { @@ -498,7 +499,7 @@ public void onFailure(Exception e) { }; } - private static void closePit(Client client, String pointInTimeId, Runnable onCompletion) { + private static void closePit(Client client, BytesReference pointInTimeId, Runnable onCompletion) { client.execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pointInTimeId), new ActionListener<>() { @Override public void onResponse(ClosePointInTimeResponse response) { @@ -522,14 +523,14 @@ public void onFailure(Exception e) { * The maintenance task, once it has opened its PIT and started running so that it has all the state it needs to do its job. */ private class RunningPeriodicMaintenanceTask implements Runnable { - private final String pointInTimeId; + private final BytesReference pointInTimeId; private final RefCountingListener listeners; private final Instant expirationTime; private final Map> existingSnapshots; private final Set existingRepositories; RunningPeriodicMaintenanceTask( - String pointInTimeId, + BytesReference pointInTimeId, ActionListener listener, Instant expirationTime, Map> existingSnapshots, diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/rest/RestMountSearchableSnapshotAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/rest/RestMountSearchableSnapshotAction.java index 9dec9fb86e26c..4fcf87bd8bf1f 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/rest/RestMountSearchableSnapshotAction.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/rest/RestMountSearchableSnapshotAction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.searchablesnapshots.rest; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -19,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestMountSearchableSnapshotAction extends BaseRestHandler { @Override @@ -36,7 +36,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli MountSearchableSnapshotRequest mountSearchableSnapshotRequest = MountSearchableSnapshotRequest.PARSER.apply( request.contentParser(), request - ).masterNodeTimeout(request.paramAsTime("master_timeout", MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT)); + ).masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute( MountSearchableSnapshotAction.INSTANCE, mountSearchableSnapshotRequest, diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java index 3d644103dfb6f..2f3ece56b3281 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java @@ -320,14 +320,16 @@ public void testCrossClusterSearchWithApiKey() throws Exception { ) ); - // Check that authentication fails if we use a non-existent cross cluster access API key + // Check that authentication fails if we use a non-existent cross cluster access API key (when skip_unavailable=false) updateClusterSettings( randomBoolean() ? Settings.builder() .put("cluster.remote.invalid_remote.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)) + .put("cluster.remote.invalid_remote.skip_unavailable", "false") .build() : Settings.builder() .put("cluster.remote.invalid_remote.mode", "proxy") + .put("cluster.remote.invalid_remote.skip_unavailable", "false") .put("cluster.remote.invalid_remote.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)) .build() ); diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java index 3a7bc49340333..931d3b94669fb 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java @@ -499,12 +499,18 @@ public void testCrossClusterQueryAgainstInvalidRemote() throws Exception { configureRemoteCluster(); populateData(); + final boolean skipUnavailable = randomBoolean(); + // avoids getting 404 errors updateClusterSettings( randomBoolean() - ? Settings.builder().put("cluster.remote.invalid_remote.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)).build() + ? Settings.builder() + .put("cluster.remote.invalid_remote.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)) + .put("cluster.remote.invalid_remote.skip_unavailable", Boolean.toString(skipUnavailable)) + .build() : Settings.builder() .put("cluster.remote.invalid_remote.mode", "proxy") + .put("cluster.remote.invalid_remote.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.invalid_remote.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)) .build() ); @@ -520,8 +526,14 @@ public void testCrossClusterQueryAgainstInvalidRemote() throws Exception { var q2 = "FROM invalid_remote:employees | SORT emp_id DESC | LIMIT 10"; performRequestWithRemoteSearchUser(esqlRequest(q2)); }); - assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(401)); - assertThat(error.getMessage(), containsString("unable to find apikey")); + + if (skipUnavailable == false) { + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(401)); + assertThat(error.getMessage(), containsString("unable to find apikey")); + } else { + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(500)); + assertThat(error.getMessage(), containsString("Unable to connect to [invalid_remote]")); + } } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java index a5ffeacf28112..793313e238651 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java @@ -47,6 +47,7 @@ import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.RemoteConnectionInfo; import org.elasticsearch.xpack.ccr.action.repositories.ClearCcrRestoreSessionAction; @@ -82,6 +83,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; public class RemoteClusterSecurityFcActionAuthorizationIT extends ESRestTestCase { @@ -176,7 +178,9 @@ public void testIndicesPrivilegesAreEnforcedForCcrRestoreSessionActions() throws } // Simulate QC behaviours by directly connecting to the FC using a transport service - try (MockTransportService service = startTransport("node", threadPool, (String) crossClusterApiKeyMap.get("encoded"))) { + final String apiKey = (String) crossClusterApiKeyMap.get("encoded"); + final boolean skipUnavailable = randomBoolean(); + try (MockTransportService service = startTransport("node", threadPool, apiKey, skipUnavailable)) { final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); final List remoteConnectionInfos = remoteClusterService.getRemoteConnectionInfos().toList(); assertThat(remoteConnectionInfos, hasSize(1)); @@ -328,28 +332,35 @@ public void testRestApiKeyIsNotAllowedOnRemoteClusterPort() throws IOException { final Response createApiKeyResponse = adminClient().performRequest(createApiKeyRequest); assertOK(createApiKeyResponse); final Map apiKeyMap = responseAsMap(createApiKeyResponse); - try (MockTransportService service = startTransport("node", threadPool, (String) apiKeyMap.get("encoded"))) { + final String apiKey = (String) apiKeyMap.get("encoded"); + final boolean skipUnavailable = randomBoolean(); + try (MockTransportService service = startTransport("node", threadPool, apiKey, skipUnavailable)) { final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); final var remoteClusterClient = remoteClusterService.getRemoteClusterClient( "my_remote_cluster", EsExecutors.DIRECT_EXECUTOR_SERVICE, RemoteClusterService.DisconnectedStrategy.RECONNECT_UNLESS_SKIP_UNAVAILABLE ); - - final ElasticsearchSecurityException e = expectThrows( - ElasticsearchSecurityException.class, + final Exception e = expectThrows( + Exception.class, () -> executeRemote( remoteClusterClient, RemoteClusterNodesAction.REMOTE_TYPE, RemoteClusterNodesAction.Request.REMOTE_CLUSTER_SERVER_NODES ) ); - assertThat( - e.getMessage(), - containsString( - "authentication expected API key type of [cross_cluster], but API key [" + apiKeyMap.get("id") + "] has type [rest]" - ) - ); + if (skipUnavailable) { + assertThat(e, instanceOf(ConnectTransportException.class)); + assertThat(e.getMessage(), containsString("Unable to connect to [my_remote_cluster]")); + } else { + assertThat(e, instanceOf(ElasticsearchSecurityException.class)); + assertThat( + e.getMessage(), + containsString( + "authentication expected API key type of [cross_cluster], but API key [" + apiKeyMap.get("id") + "] has type [rest]" + ) + ); + } } } @@ -392,12 +403,14 @@ public void testUpdateCrossClusterApiKey() throws Exception { final FieldCapabilitiesRequest request = new FieldCapabilitiesRequest().indices("index").fields("name"); // Perform cross-cluster requests + boolean skipUnavailable = randomBoolean(); try ( MockTransportService service = startTransport( "node", threadPool, (String) crossClusterApiKeyMap.get("encoded"), - Map.of(TransportFieldCapabilitiesAction.NAME, crossClusterAccessSubjectInfo) + Map.of(TransportFieldCapabilitiesAction.NAME, crossClusterAccessSubjectInfo), + skipUnavailable ) ) { final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); @@ -508,7 +521,8 @@ public void testMalformedShardLevelActionIsRejected() throws Exception { "node", threadPool, (String) crossClusterApiKeyMap.get("encoded"), - Map.of(TransportGetAction.TYPE.name() + "[s]", buildCrossClusterAccessSubjectInfo(indexA)) + Map.of(TransportGetAction.TYPE.name() + "[s]", buildCrossClusterAccessSubjectInfo(indexA)), + randomBoolean() ) ) { final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); @@ -552,15 +566,21 @@ private static CrossClusterAccessSubjectInfo buildCrossClusterAccessSubjectInfo( ); } - private static MockTransportService startTransport(final String nodeName, final ThreadPool threadPool, String encodedApiKey) { - return startTransport(nodeName, threadPool, encodedApiKey, Map.of()); + private static MockTransportService startTransport( + final String nodeName, + final ThreadPool threadPool, + String encodedApiKey, + boolean skipUnavailable + ) { + return startTransport(nodeName, threadPool, encodedApiKey, Map.of(), skipUnavailable); } private static MockTransportService startTransport( final String nodeName, final ThreadPool threadPool, String encodedApiKey, - Map subjectInfoLookup + Map subjectInfoLookup, + boolean skipUnavailable ) { final String remoteClusterServerEndpoint = testCluster.getRemoteClusterServerEndpoint(0); @@ -573,9 +593,11 @@ private static MockTransportService startTransport( builder.setSecureSettings(secureSettings); if (randomBoolean()) { builder.put("cluster.remote.my_remote_cluster.mode", "sniff") + .put("cluster.remote.my_remote_cluster.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.my_remote_cluster.seeds", remoteClusterServerEndpoint); } else { builder.put("cluster.remote.my_remote_cluster.mode", "proxy") + .put("cluster.remote.my_remote_cluster.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.my_remote_cluster.proxy_address", remoteClusterServerEndpoint); } diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java index 29afda08500ca..c791752e76de0 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java @@ -105,9 +105,11 @@ protected void configureRemoteCluster(boolean isProxyMode) throws Exception { final Settings.Builder builder = Settings.builder(); if (isProxyMode) { builder.put("cluster.remote.my_remote_cluster.mode", "proxy") + .put("cluster.remote.my_remote_cluster.skip_unavailable", "false") .put("cluster.remote.my_remote_cluster.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)); } else { builder.put("cluster.remote.my_remote_cluster.mode", "sniff") + .put("cluster.remote.my_remote_cluster.skip_unavailable", "false") .putList("cluster.remote.my_remote_cluster.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)); } updateClusterSettings(builder.build()); diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestIT.java index d1e78d4f3ad39..c6bb6e10f0537 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.remotecluster; +import org.apache.http.util.EntityUtils; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -331,66 +332,108 @@ public void testCrossClusterSearch() throws Exception { ) ); - // Check that authentication fails if we use a non-existent API key + // Check that authentication fails if we use a non-existent API key (when skip_unavailable=false) + boolean skipUnavailable = randomBoolean(); updateClusterSettings( randomBoolean() ? Settings.builder() .put("cluster.remote.invalid_remote.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)) + .put("cluster.remote.invalid_remote.skip_unavailable", Boolean.toString(skipUnavailable)) .build() : Settings.builder() .put("cluster.remote.invalid_remote.mode", "proxy") + .put("cluster.remote.invalid_remote.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.invalid_remote.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)) .build() ); - final ResponseException exception4 = expectThrows( - ResponseException.class, - () -> performRequestWithRemoteSearchUser(new Request("GET", "/invalid_remote:index1/_search")) - ); - assertThat(exception4.getResponse().getStatusLine().getStatusCode(), equalTo(401)); - assertThat(exception4.getMessage(), containsString("unable to find apikey")); + if (skipUnavailable) { + /* + when skip_unavailable=true, response should be something like: + {"took":1,"timed_out":false,"num_reduce_phases":0,"_shards":{"total":0,"successful":0,"skipped":0,"failed":0}, + "_clusters":{"total":1,"successful":0,"skipped":1,"running":0,"partial":0,"failed":0, + "details":{"invalid_remote":{"status":"skipped","indices":"index1","timed_out":false, + "failures":[{"shard":-1,"index":null,"reason":{"type":"connect_transport_exception", + "reason":"Unable to connect to [invalid_remote]"}}]}}}, + "hits":{"total":{"value":0,"relation":"eq"},"max_score":null,"hits":[]}} + */ + Response invalidRemoteResponse = performRequestWithRemoteSearchUser(new Request("GET", "/invalid_remote:index1/_search")); + assertThat(invalidRemoteResponse.getStatusLine().getStatusCode(), equalTo(200)); + String responseJson = EntityUtils.toString(invalidRemoteResponse.getEntity()); + assertThat(responseJson, containsString("\"status\":\"skipped\"")); + assertThat(responseJson, containsString("connect_transport_exception")); + } else { + final ResponseException exception4 = expectThrows( + ResponseException.class, + () -> performRequestWithRemoteSearchUser(new Request("GET", "/invalid_remote:index1/_search")) + ); + assertThat(exception4.getResponse().getStatusLine().getStatusCode(), equalTo(401)); + assertThat(exception4.getMessage(), containsString("unable to find apikey")); + } - // check that REST API key is not supported by cross cluster access + // check that REST API key is not supported by cross cluster access (when skip_unavailable=false) + skipUnavailable = randomBoolean(); updateClusterSettings( randomBoolean() ? Settings.builder() .put("cluster.remote.wrong_api_key_type.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)) + .put("cluster.remote.wrong_api_key_type.skip_unavailable", Boolean.toString(skipUnavailable)) .build() : Settings.builder() .put("cluster.remote.wrong_api_key_type.mode", "proxy") + .put("cluster.remote.wrong_api_key_type.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.wrong_api_key_type.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)) .build() ); - final ResponseException exception5 = expectThrows( - ResponseException.class, - () -> performRequestWithRemoteSearchUser(new Request("GET", "/wrong_api_key_type:*/_search")) - ); - assertThat(exception5.getResponse().getStatusLine().getStatusCode(), equalTo(401)); - assertThat( - exception5.getMessage(), - containsString( - "authentication expected API key type of [cross_cluster], but API key [" - + REST_API_KEY_MAP_REF.get().get("id") - + "] has type [rest]" - ) - ); + if (skipUnavailable) { + Response invalidRemoteResponse = performRequestWithRemoteSearchUser(new Request("GET", "/wrong_api_key_type:*/_search")); + assertThat(invalidRemoteResponse.getStatusLine().getStatusCode(), equalTo(200)); + String responseJson = EntityUtils.toString(invalidRemoteResponse.getEntity()); + assertThat(responseJson, containsString("\"status\":\"skipped\"")); + assertThat(responseJson, containsString("connect_transport_exception")); + } else { + final ResponseException exception5 = expectThrows( + ResponseException.class, + () -> performRequestWithRemoteSearchUser(new Request("GET", "/wrong_api_key_type:*/_search")) + ); + assertThat(exception5.getResponse().getStatusLine().getStatusCode(), equalTo(401)); + assertThat( + exception5.getMessage(), + containsString( + "authentication expected API key type of [cross_cluster], but API key [" + + REST_API_KEY_MAP_REF.get().get("id") + + "] has type [rest]" + ) + ); + } - // Check invalid cross-cluster API key length is rejected + // Check invalid cross-cluster API key length is rejected (and gets security error when skip_unavailable=false) + skipUnavailable = randomBoolean(); updateClusterSettings( randomBoolean() ? Settings.builder() .put("cluster.remote.invalid_secret_length.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)) + .put("cluster.remote.invalid_secret_length.skip_unavailable", Boolean.toString(skipUnavailable)) .build() : Settings.builder() .put("cluster.remote.invalid_secret_length.mode", "proxy") + .put("cluster.remote.invalid_secret_length.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.invalid_secret_length.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)) .build() ); - final ResponseException exception6 = expectThrows( - ResponseException.class, - () -> performRequestWithRemoteSearchUser(new Request("GET", "/invalid_secret_length:*/_search")) - ); - assertThat(exception6.getResponse().getStatusLine().getStatusCode(), equalTo(401)); - assertThat(exception6.getMessage(), containsString("invalid cross-cluster API key value")); + if (skipUnavailable) { + Response invalidRemoteResponse = performRequestWithRemoteSearchUser(new Request("GET", "/invalid_secret_length:*/_search")); + assertThat(invalidRemoteResponse.getStatusLine().getStatusCode(), equalTo(200)); + String responseJson = EntityUtils.toString(invalidRemoteResponse.getEntity()); + assertThat(responseJson, containsString("\"status\":\"skipped\"")); + assertThat(responseJson, containsString("connect_transport_exception")); + } else { + final ResponseException exception6 = expectThrows( + ResponseException.class, + () -> performRequestWithRemoteSearchUser(new Request("GET", "/invalid_secret_length:*/_search")) + ); + assertThat(exception6.getResponse().getStatusLine().getStatusCode(), equalTo(401)); + assertThat(exception6.getMessage(), containsString("invalid cross-cluster API key value")); + } } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java index d105b616c57f1..1be8f543ebcb3 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -1303,7 +1304,7 @@ public void testReaderId() throws Exception { } refresh(); - String pitId = openPointInTime("user1", TimeValue.timeValueMinutes(1), "test"); + BytesReference pitId = openPointInTime("user1", TimeValue.timeValueMinutes(1), "test"); SearchResponse response = null; try { for (int from = 0; from < numVisible; from++) { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java index 591b20bd82f47..849f5d1a48c5e 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.analysis.common.CommonAnalysisPlugin; import org.elasticsearch.client.internal.Requests; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -1157,7 +1158,7 @@ public void testScroll() throws Exception { } } - static String openPointInTime(String userName, TimeValue keepAlive, String... indices) { + static BytesReference openPointInTime(String userName, TimeValue keepAlive, String... indices) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); final OpenPointInTimeResponse response = client().filterWithHeader( Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue(userName, USERS_PASSWD)) @@ -1178,7 +1179,7 @@ public void testPointInTimeId() throws Exception { } refresh("test"); - String pitId = openPointInTime("user1", TimeValue.timeValueMinutes(1), "test"); + BytesReference pitId = openPointInTime("user1", TimeValue.timeValueMinutes(1), "test"); try { for (int from = 0; from < numDocs; from++) { assertResponse( diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index 1a5b1ab39cd83..fa9b53c5af935 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -14,7 +14,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.DocWriteRequest; @@ -415,7 +414,6 @@ private void createApiKeyAndIndexIt( final Instant expiration = getApiKeyExpiration(created, request.getExpiration()); final SecureString apiKey = UUIDs.randomBase64UUIDSecureString(); assert ApiKey.Type.CROSS_CLUSTER != request.getType() || API_KEY_SECRET_LENGTH == apiKey.length(); - final Version version = clusterService.state().nodes().getMinNodeVersion(); computeHashForApiKey(apiKey, listener.delegateFailure((l, apiKeyHashChars) -> { try ( @@ -428,7 +426,7 @@ private void createApiKeyAndIndexIt( expiration, request.getRoleDescriptors(), request.getType(), - version, + ApiKey.CURRENT_API_KEY_VERSION, request.getMetadata() ) ) { @@ -712,7 +710,7 @@ static XContentBuilder newDocument( Instant expiration, List keyRoleDescriptors, ApiKey.Type type, - Version version, + ApiKey.Version version, @Nullable Map metadata ) throws IOException { final XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -727,7 +725,7 @@ static XContentBuilder newDocument( addRoleDescriptors(builder, keyRoleDescriptors); addLimitedByRoleDescriptors(builder, userRoleDescriptors); - builder.field("name", name).field("version", version.id).field("metadata_flattened", metadata); + builder.field("name", name).field("version", version.version()).field("metadata_flattened", metadata); addCreator(builder, authentication); return builder.endObject(); @@ -742,7 +740,7 @@ static XContentBuilder newDocument( static XContentBuilder maybeBuildUpdatedDocument( final String apiKeyId, final ApiKeyDoc currentApiKeyDoc, - final Version targetDocVersion, + final ApiKey.Version targetDocVersion, final Authentication authentication, final BaseUpdateApiKeyRequest request, final Set userRoleDescriptors, @@ -779,7 +777,7 @@ static XContentBuilder maybeBuildUpdatedDocument( addLimitedByRoleDescriptors(builder, userRoleDescriptors); - builder.field("name", currentApiKeyDoc.name).field("version", targetDocVersion.id); + builder.field("name", currentApiKeyDoc.name).field("version", targetDocVersion.version()); assert currentApiKeyDoc.metadataFlattened == null || MetadataUtils.containsReservedMetadata( @@ -807,12 +805,12 @@ static XContentBuilder maybeBuildUpdatedDocument( private static boolean isNoop( final String apiKeyId, final ApiKeyDoc apiKeyDoc, - final Version targetDocVersion, + final ApiKey.Version targetDocVersion, final Authentication authentication, final BaseUpdateApiKeyRequest request, final Set userRoleDescriptors ) throws IOException { - if (apiKeyDoc.version != targetDocVersion.id) { + if (apiKeyDoc.version != targetDocVersion.version()) { return false; } @@ -1468,8 +1466,8 @@ private IndexRequest maybeBuildIndexRequest( currentVersionedDoc.primaryTerm() ); } - final var targetDocVersion = clusterService.state().nodes().getMinNodeVersion(); - final var currentDocVersion = Version.fromId(currentVersionedDoc.doc().version); + final var targetDocVersion = ApiKey.CURRENT_API_KEY_VERSION; + final var currentDocVersion = new ApiKey.Version(currentVersionedDoc.doc().version); assert currentDocVersion.onOrBefore(targetDocVersion) : "current API key doc version must be on or before target version"; if (logger.isDebugEnabled() && currentDocVersion.before(targetDocVersion)) { logger.debug( diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java index 773573d02e45a..95574c317495a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java @@ -214,13 +214,9 @@ public void removeStateListener(BiConsumer listener) { * Get the minimum security index mapping version in the cluster */ private SystemIndexDescriptor.MappingsVersion getMinSecurityIndexMappingVersion(ClusterState clusterState) { - var minClusterVersion = clusterState.getMinSystemIndexMappingVersions().get(systemIndexDescriptor.getPrimaryIndex()); - // Can be null in mixed clusters. This indicates that the cluster state and index needs to be updated with the latest mapping - // version from the index descriptor - if (minClusterVersion == null) { - return systemIndexDescriptor.getMappingsVersion(); - } - return minClusterVersion; + SystemIndexDescriptor.MappingsVersion mappingsVersion = clusterState.getMinSystemIndexMappingVersions() + .get(systemIndexDescriptor.getPrimaryIndex()); + return mappingsVersion == null ? new SystemIndexDescriptor.MappingsVersion(1, 0) : mappingsVersion; } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index 0cb7a270099ad..b3ec3ef117c3e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -67,7 +67,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.TransportVersionUtils; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; @@ -1161,7 +1160,7 @@ private static Tuple, Map> newApiKeyDocument Instant.now().plus(expiry), keyRoles, type, - Version.CURRENT, + ApiKey.CURRENT_API_KEY_VERSION, metadataMap ); Map keyMap = XContentHelper.convertToMap(BytesReference.bytes(docSource), true, XContentType.JSON).v2(); @@ -2368,7 +2367,7 @@ public void testMaybeBuildUpdatedDocument() throws IOException { final long now = randomMillisUpToYear9999(); when(clock.instant()).thenReturn(Instant.ofEpochMilli(now)); final Map oldMetadata = ApiKeyTests.randomMetadata(); - final Version oldVersion = VersionUtils.randomVersion(random()); + final ApiKey.Version oldVersion = new ApiKey.Version(randomIntBetween(1, ApiKey.CURRENT_API_KEY_VERSION.version())); final ApiKeyDoc oldApiKeyDoc = ApiKeyDoc.fromXContent( XContentHelper.createParser( XContentParserConfiguration.EMPTY, @@ -2419,8 +2418,8 @@ public void testMaybeBuildUpdatedDocument() throws IOException { final Map newMetadata = changeMetadata ? randomValueOtherThanMany(md -> md == null || md.equals(oldMetadata), ApiKeyTests::randomMetadata) : (randomBoolean() ? oldMetadata : null); - final Version newVersion = changeVersion - ? randomValueOtherThan(oldVersion, () -> VersionUtils.randomVersion(random())) + final ApiKey.Version newVersion = changeVersion + ? randomValueOtherThan(oldVersion, ApiKeyServiceTests::randomApiKeyVersion) : oldVersion; final Authentication newAuthentication = changeCreator ? randomValueOtherThanMany( @@ -2468,7 +2467,7 @@ public void testMaybeBuildUpdatedDocument() throws IOException { assertEquals(oldApiKeyDoc.hash, updatedApiKeyDoc.hash); assertEquals(oldApiKeyDoc.creationTime, updatedApiKeyDoc.creationTime); assertEquals(oldApiKeyDoc.invalidated, updatedApiKeyDoc.invalidated); - assertEquals(newVersion.id, updatedApiKeyDoc.version); + assertEquals(newVersion.version(), updatedApiKeyDoc.version); final var actualUserRoles = service.parseRoleDescriptorsBytes( "", updatedApiKeyDoc.limitedByRoleDescriptorsBytes, @@ -2991,7 +2990,7 @@ public static Authentication createApiKeyAuthentication( Instant.now().plus(Duration.ofSeconds(3600)), keyRoles, ApiKey.Type.REST, - Version.CURRENT, + ApiKey.CURRENT_API_KEY_VERSION, randomBoolean() ? null : Map.of(randomAlphaOfLengthBetween(3, 8), randomAlphaOfLengthBetween(3, 8)) ); final ApiKeyDoc apiKeyDoc = ApiKeyDoc.fromXContent( @@ -3207,4 +3206,8 @@ private static Authenticator.Context getAuthenticatorContext(ThreadContext threa mock(Realms.class) ); } + + private static ApiKey.Version randomApiKeyVersion() { + return new ApiKey.Version(randomIntBetween(1, ApiKey.CURRENT_API_KEY_VERSION.version())); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 861b21403b2b0..ae33c4e5e31e8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -94,6 +94,8 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -942,7 +944,7 @@ public void testUserWithNoRolesOpenPointInTimeWithRemoteIndices() { } public void testUserWithNoRolesCanClosePointInTime() { - final ClosePointInTimeRequest closePointInTimeRequest = new ClosePointInTimeRequest(randomAlphaOfLength(8)); + final ClosePointInTimeRequest closePointInTimeRequest = new ClosePointInTimeRequest(new BytesArray(randomAlphaOfLength(8))); final Authentication authentication = createAuthentication(new User("test user")); mockEmptyMetadata(); final String requestId = AuditUtil.getOrGenerateRequestId(threadContext); @@ -3641,7 +3643,7 @@ private static class TestSearchPhaseResult extends SearchPhaseResult { } } - private static String createEncodedPIT(Index index) { + private static BytesReference createEncodedPIT(Index index) { DiscoveryNode node1 = DiscoveryNodeUtils.create("node_1"); TestSearchPhaseResult testSearchPhaseResult1 = new TestSearchPhaseResult(new ShardSearchContextId("a", 1), node1); testSearchPhaseResult1.setSearchShardTarget( diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestDeleteShutdownNodeAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestDeleteShutdownNodeAction.java index b97ea82d3d73f..44b7461fe70cd 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestDeleteShutdownNodeAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestDeleteShutdownNodeAction.java @@ -14,6 +14,8 @@ import java.util.List; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; + public class RestDeleteShutdownNodeAction extends BaseRestHandler { @Override @@ -35,7 +37,7 @@ public boolean canTripCircuitBreaker() { protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { String nodeId = request.param("nodeId"); final var parsedRequest = new DeleteShutdownNodeAction.Request(nodeId); - parsedRequest.masterNodeTimeout(request.paramAsTime("master_timeout", parsedRequest.masterNodeTimeout())); + parsedRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(DeleteShutdownNodeAction.INSTANCE, parsedRequest, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestPutShutdownNodeAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestPutShutdownNodeAction.java index a1a987b57b233..c2efaa6e1c11b 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestPutShutdownNodeAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestPutShutdownNodeAction.java @@ -16,6 +16,8 @@ import java.io.IOException; import java.util.List; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; + public class RestPutShutdownNodeAction extends BaseRestHandler { @Override @@ -38,7 +40,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli String nodeId = request.param("nodeId"); try (XContentParser parser = request.contentParser()) { PutShutdownNodeAction.Request parsedRequest = PutShutdownNodeAction.Request.parseRequest(nodeId, parser); - parsedRequest.masterNodeTimeout(request.paramAsTime("master_timeout", parsedRequest.masterNodeTimeout())); + parsedRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(PutShutdownNodeAction.INSTANCE, parsedRequest, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestDeleteSnapshotLifecycleAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestDeleteSnapshotLifecycleAction.java index 5d9a692a0876c..e67d7dd2e2fe9 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestDeleteSnapshotLifecycleAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestDeleteSnapshotLifecycleAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestDeleteSnapshotLifecycleAction extends BaseRestHandler { @@ -37,7 +38,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli String lifecycleId = request.param("name"); DeleteSnapshotLifecycleAction.Request req = new DeleteSnapshotLifecycleAction.Request(lifecycleId); req.ackTimeout(request.paramAsTime("timeout", req.ackTimeout())); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(DeleteSnapshotLifecycleAction.INSTANCE, req, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotLifecycleAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotLifecycleAction.java index 622021ca87a89..7de23c04d8e91 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotLifecycleAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotLifecycleAction.java @@ -19,6 +19,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestExecuteSnapshotLifecycleAction extends BaseRestHandler { @@ -38,7 +39,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli String snapLifecycleId = request.param("name"); ExecuteSnapshotLifecycleAction.Request req = new ExecuteSnapshotLifecycleAction.Request(snapLifecycleId); req.ackTimeout(request.paramAsTime("timeout", req.ackTimeout())); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(ExecuteSnapshotLifecycleAction.INSTANCE, req, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotRetentionAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotRetentionAction.java index 2175839886022..84943e361b94b 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotRetentionAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotRetentionAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestExecuteSnapshotRetentionAction extends BaseRestHandler { @@ -36,7 +37,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { ExecuteSnapshotRetentionAction.Request req = new ExecuteSnapshotRetentionAction.Request(); req.ackTimeout(request.paramAsTime("timeout", req.ackTimeout())); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(ExecuteSnapshotRetentionAction.INSTANCE, req, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSLMStatusAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSLMStatusAction.java index 278e15a9f9b36..5e4ea3002e614 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSLMStatusAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSLMStatusAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestGetSLMStatusAction extends BaseRestHandler { @@ -37,7 +38,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { AcknowledgedRequest.Plain request = new AcknowledgedRequest.Plain(); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(GetSLMStatusAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleAction.java index c6609d85ca1c3..3818947488bfe 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestGetSnapshotLifecycleAction extends BaseRestHandler { @@ -38,7 +39,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli String[] lifecycleNames = Strings.splitStringByCommaToArray(request.param("name")); GetSnapshotLifecycleAction.Request req = new GetSnapshotLifecycleAction.Request(lifecycleNames); req.ackTimeout(request.paramAsTime("timeout", req.ackTimeout())); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(GetSnapshotLifecycleAction.INSTANCE, req, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleStatsAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleStatsAction.java index 5fea0905f04bd..3a177dfa467be 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleStatsAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleStatsAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestGetSnapshotLifecycleStatsAction extends BaseRestHandler { @@ -37,7 +38,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { AcknowledgedRequest.Plain req = new AcknowledgedRequest.Plain(); req.ackTimeout(request.paramAsTime("timeout", req.ackTimeout())); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(GetSnapshotLifecycleStatsAction.INSTANCE, req, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestPutSnapshotLifecycleAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestPutSnapshotLifecycleAction.java index 968f043f61bd6..8066ab2575385 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestPutSnapshotLifecycleAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestPutSnapshotLifecycleAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestPutSnapshotLifecycleAction extends BaseRestHandler { @@ -40,7 +41,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli try (XContentParser parser = request.contentParser()) { PutSnapshotLifecycleAction.Request req = PutSnapshotLifecycleAction.Request.parseRequest(snapLifecycleName, parser); req.ackTimeout(request.paramAsTime("timeout", req.ackTimeout())); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(PutSnapshotLifecycleAction.INSTANCE, req, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestStartSLMAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestStartSLMAction.java index 4db876149e784..ab41973f640ac 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestStartSLMAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestStartSLMAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestStartSLMAction extends BaseRestHandler { @@ -36,7 +37,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { StartSLMAction.Request request = new StartSLMAction.Request(); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(StartSLMAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestStopSLMAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestStopSLMAction.java index 9131dcc15cf78..58c96a64195d5 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestStopSLMAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestStopSLMAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestStopSLMAction extends BaseRestHandler { @@ -36,7 +37,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { StopSLMAction.Request request = new StopSLMAction.Request(); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(StopSLMAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index 1d7a3cdd836ff..9cf60ec3bb2e4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -160,7 +160,7 @@ private void searchWithPointInTime(SearchRequest search, ActionListener { - String pitId = openPointInTimeResponse.getPointInTimeId(); + BytesReference pitId = openPointInTimeResponse.getPointInTimeId(); search.indicesOptions(SearchRequest.DEFAULT_INDICES_OPTIONS); search.indices(Strings.EMPTY_ARRAY); search.source().pointInTimeBuilder(new PointInTimeBuilder(pitId)); @@ -176,14 +176,14 @@ private void searchWithPointInTime(SearchRequest search, ActionListener listener) { + private static void closePointInTimeAfterError(Client client, BytesReference pointInTimeId, Exception e, ActionListener listener) { closePointInTime(client, pointInTimeId, wrap(r -> listener.onFailure(e), closeError -> { e.addSuppressed(closeError); listener.onFailure(e); })); } - public static void closePointInTime(Client client, String pointInTimeId, ActionListener listener) { + public static void closePointInTime(Client client, BytesReference pointInTimeId, ActionListener listener) { if (pointInTimeId != null) { // request should not be made with the parent task assigned because the parent task might already be canceled client = client instanceof ParentTaskAssigningClient wrapperClient ? wrapperClient.unwrap() : client; diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java index 86cb54a33bb5a..10d6b04d7505c 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java @@ -22,6 +22,8 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.tasks.TaskCancelHelper; import org.elasticsearch.tasks.TaskCancelledException; @@ -171,7 +173,7 @@ public void testCancellationDuringSearch(String query) throws InterruptedExcepti ClusterService mockClusterService = mockClusterService(nodeId); String[] indices = new String[] { "endgame" }; - String pitId = randomAlphaOfLength(10); + BytesReference pitId = new BytesArray(randomAlphaOfLength(10)); // Emulation of field capabilities FieldCapabilitiesResponse fieldCapabilitiesResponse = mock(FieldCapabilitiesResponse.class); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/empty_field_metric.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/empty_field_metric.yml new file mode 100644 index 0000000000000..891e02bc2dcf5 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/empty_field_metric.yml @@ -0,0 +1,135 @@ +setup: + - do: + indices.create: + index: test_1 + body: + mappings: + properties: + terms_field: + type: keyword + date_field: + type: date + int_field: + type : integer + double_field: + type : double + string_field: + type: keyword + histogram_field: + type: histogram + + - do: + bulk: + refresh: true + body: + - index: + _index: test_1 + _id: "1" + - terms_field: foo + date_field: 2024-01-02 + - index: + _index: test_1 + _id: "2" + - terms_field: foo + date_field: 2024-01-02 + - index: + _index: test_1 + _id: "3" + - terms_field: bar + date_field: 2024-01-01 + +--- +"Basic test": + + - do: + search: + index: test_1 + rest_total_hits_as_int: true + body: + aggs: + the_terms: + terms: + field: terms_field + "order": + "_key": "asc" + aggs: + boxplot_agg: + boxplot: + field: double_field + t_test_agg: + t_test: + a: + field: double_field + b: + field: int_field + type: paired + string_stats_agg: + string_stats: + field: string_field + + + - match: { hits.total: 3 } + - length: { hits.hits: 3 } + - match: { aggregations.the_terms.buckets.0.key: bar} + - match: { aggregations.the_terms.buckets.0.doc_count: 1} + - exists: aggregations.the_terms.buckets.0.boxplot_agg.min + - exists: aggregations.the_terms.buckets.0.boxplot_agg.max + - exists: aggregations.the_terms.buckets.0.boxplot_agg.q3 + - exists: aggregations.the_terms.buckets.0.boxplot_agg.q1 + - exists: aggregations.the_terms.buckets.0.boxplot_agg.q2 + - exists: aggregations.the_terms.buckets.0.boxplot_agg.q3 + - match: { aggregations.the_terms.buckets.0.t_test_agg.value: null } + - match: { aggregations.the_terms.buckets.0.string_stats_agg.count: 0 } + - match: { aggregations.the_terms.buckets.0.string_stats_agg.min_length: null } + - match: { aggregations.the_terms.buckets.0.string_stats_agg.max_length: null } + - match: { aggregations.the_terms.buckets.0.string_stats_agg.avg_length: null } + - match: { aggregations.the_terms.buckets.0.string_stats_agg.entropy: 0 } + - match: { aggregations.the_terms.buckets.1.key: foo} + - match: { aggregations.the_terms.buckets.1.doc_count: 2} + - exists: aggregations.the_terms.buckets.1.boxplot_agg.min + - exists: aggregations.the_terms.buckets.1.boxplot_agg.max + - exists: aggregations.the_terms.buckets.1.boxplot_agg.q3 + - exists: aggregations.the_terms.buckets.1.boxplot_agg.q1 + - exists: aggregations.the_terms.buckets.1.boxplot_agg.q2 + - exists: aggregations.the_terms.buckets.1.boxplot_agg.q3 + - match: { aggregations.the_terms.buckets.1.t_test_agg.value: null } + - match: { aggregations.the_terms.buckets.1.string_stats_agg.count: 0 } + - match: { aggregations.the_terms.buckets.1.string_stats_agg.min_length: null } + - match: { aggregations.the_terms.buckets.1.string_stats_agg.max_length: null } + - match: { aggregations.the_terms.buckets.1.string_stats_agg.avg_length: null } + - match: { aggregations.the_terms.buckets.1.string_stats_agg.entropy: 0 } + +--- +"Rate test": + + - do: + search: + index: test_1 + rest_total_hits_as_int: true + body: + aggs: + the_date_hist: + date_histogram: + field: date_field + calendar_interval: day + format: yyyy-MM-dd + aggs: + rate_agg: + rate: + field: double_field + rate_hist_agg: + rate: + field: histogram_field + + + - match: { hits.total: 3 } + - length: { hits.hits: 3 } + - match: { aggregations.the_date_hist.buckets.0.key_as_string: 2024-01-01 } + - match: { aggregations.the_date_hist.buckets.0.doc_count: 1 } + - match: { aggregations.the_date_hist.buckets.0.rate_agg.value: 0.0 } + - match: { aggregations.the_date_hist.buckets.0.rate_hist_agg.value: 0.0 } + - match: { aggregations.the_date_hist.buckets.1.key_as_string: 2024-01-02 } + - match: { aggregations.the_date_hist.buckets.1.doc_count: 2 } + - match: { aggregations.the_date_hist.buckets.1.rate_agg.value: 0.0 } + - match: { aggregations.the_date_hist.buckets.1.rate_hist_agg.value: 0.0 } + diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_tsdb.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_tsdb.yml index 30b81860f014f..c09bc17ab9a5c 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_tsdb.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_tsdb.yml @@ -1,7 +1,7 @@ setup: - requires: - cluster_features: ["gte_v8.11.0"] - reason: "ESQL is available in 8.11+" + cluster_features: ["esql.metrics_counter_fields"] + reason: "require metrics counter fields" test_runner_features: allowed_warnings_regex - do: indices.create: @@ -38,7 +38,7 @@ setup: type: long time_series_metric: counter rx: - type: long + type: integer time_series_metric: counter - do: bulk: @@ -112,7 +112,6 @@ load everything: reason: "_source is available in 8.13+" - do: allowed_warnings_regex: - - "Field \\[.*\\] cannot be retrieved, it is unsupported or not indexed; returning null" - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: @@ -126,9 +125,9 @@ load everything: - match: {columns.2.name: "k8s.pod.name"} - match: {columns.2.type: "keyword"} - match: {columns.3.name: "k8s.pod.network.rx"} - - match: {columns.3.type: "unsupported"} + - match: {columns.3.type: "counter_integer"} - match: {columns.4.name: "k8s.pod.network.tx"} - - match: {columns.4.type: "unsupported"} + - match: {columns.4.type: "counter_long"} - match: {columns.5.name: "k8s.pod.uid"} - match: {columns.5.type: "keyword"} - match: {columns.6.name: "metricset"} @@ -139,7 +138,6 @@ load everything: load a document: - do: allowed_warnings_regex: - - "Field \\[.*\\] cannot be retrieved, it is unsupported or not indexed; returning null" - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: @@ -151,25 +149,60 @@ load a document: - match: {values.0.0: "2021-04-28T18:50:23.142Z"} - match: {values.0.1: "10.10.55.3"} - match: {values.0.2: "dog"} - - match: {values.0.3: null } - - match: {values.0.4: null } + - match: {values.0.3: 530600088 } + - match: {values.0.4: 1434577921 } - match: {values.0.5: "df3145b3-0563-4d3b-a0f7-897eb2876ea9"} - match: {values.0.6: "pod"} --- -filter on counter: +filter on counter without cast: - do: - catch: /Cannot use field \[k8s.pod.network.tx\] with unsupported type \[counter\]/ + catch: bad_request esql.query: body: query: 'from test | where k8s.pod.network.tx == 1434577921' version: 2024.04.01 +--- +cast counter then filter: + - do: + esql.query: + body: + query: 'from test | where k8s.pod.network.tx::long == 2005177954 and k8s.pod.network.rx::integer == 801479970 | sort @timestamp | limit 10' + version: 2024.04.01 + - length: {values: 1} + - length: {values.0: 7} + - match: {values.0.0: "2021-04-28T18:50:24.467Z"} + - match: {values.0.1: "10.10.55.1"} + - match: {values.0.2: "cat"} + - match: {values.0.3: 801479970 } + - match: {values.0.4: 2005177954 } + - match: {values.0.5: "947e4ced-1786-4e53-9e0c-5c447e959507"} + - match: {values.0.6: "pod"} + +--- +sort on counter without cast: + - do: + catch: bad_request + esql.query: + body: + query: 'from test | KEEP k8s.pod.network.tx | sort @k8s.pod.network.tx | limit 1' + version: 2024.04.01 + +--- +cast then sort on counter: + - do: + esql.query: + body: + query: 'from test | KEEP k8s.pod.network.tx | EVAL tx=to_long(k8s.pod.network.tx) | sort tx | limit 1' + version: 2024.04.01 + - length: {values: 1} + - match: {values.0.0: 1434521831 } + --- from doc with aggregate_metric_double: - do: allowed_warnings_regex: - - "Field \\[.*\\] cannot be retrieved, it is unsupported or not indexed; returning null" - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: @@ -201,7 +234,6 @@ stats on aggregate_metric_double: from index pattern unsupported counter: - do: allowed_warnings_regex: - - "Field \\[.*\\] cannot be retrieved, it is unsupported or not indexed; returning null" - "No limit defined, adding default limit of \\[.*\\]" esql.query: body: @@ -219,7 +251,7 @@ from index pattern unsupported counter: - match: {columns.4.name: "k8s.pod.name"} - match: {columns.4.type: "keyword"} - match: {columns.5.name: "k8s.pod.network.rx"} - - match: {columns.5.type: "unsupported"} + - match: {columns.5.type: "counter_integer"} - match: {columns.6.name: "k8s.pod.network.tx"} - match: {columns.6.type: "unsupported"} - match: {columns.7.name: "k8s.pod.uid"} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java index 66c618bc07c46..f77148de8d4a2 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java @@ -29,6 +29,7 @@ import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; @@ -450,7 +451,7 @@ private void closePointInTime(String name) { return; } - String oldPit = pit.getEncodedId(); + BytesReference oldPit = pit.getEncodedId(); ClosePointInTimeRequest closePitRequest = new ClosePointInTimeRequest(oldPit); ClientHelper.executeWithHeadersAsync( @@ -473,7 +474,8 @@ private void injectPointInTimeIfNeeded( ActionListener> listener ) { SearchRequest searchRequest = namedSearchRequest.v2(); - if (disablePit || searchRequest.indices().length == 0) { + // We explicitly disable PIT in the presence of remote clusters in the source due to huge PIT handles causing performance problems. + if (disablePit || searchRequest.indices().length == 0 || transformConfig.getSource().requiresRemoteCluster()) { listener.onResponse(namedSearchRequest); return; } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java index 9173e75c4737a..062c951f67c96 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java @@ -24,6 +24,8 @@ import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.CompositeBytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -171,7 +173,7 @@ public void testPitInjection() throws InterruptedException { ); this.assertAsync(listener -> indexer.doNextSearch(0, listener), response -> { - assertEquals("the_pit_id+", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id+"), response.pointInTimeId()); }); assertEquals(1L, client.getPitContextCounter()); @@ -184,15 +186,15 @@ public void testPitInjection() throws InterruptedException { assertEquals(0L, client.getPitContextCounter()); this.assertAsync(listener -> indexer.doNextSearch(0, listener), response -> { - assertEquals("the_pit_id+", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id+"), response.pointInTimeId()); }); this.assertAsync(listener -> indexer.doNextSearch(0, listener), response -> { - assertEquals("the_pit_id++", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id++"), response.pointInTimeId()); }); this.assertAsync(listener -> indexer.doNextSearch(0, listener), response -> { - assertEquals("the_pit_id+++", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id+++"), response.pointInTimeId()); }); assertEquals(1L, client.getPitContextCounter()); @@ -201,15 +203,15 @@ public void testPitInjection() throws InterruptedException { assertEquals(0L, client.getPitContextCounter()); this.assertAsync(listener -> indexer.doNextSearch(0, listener), response -> { - assertEquals("the_pit_id+", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id+"), response.pointInTimeId()); }); this.assertAsync(listener -> indexer.doNextSearch(0, listener), response -> { - assertEquals("the_pit_id++", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id++"), response.pointInTimeId()); }); this.assertAsync(listener -> indexer.doNextSearch(0, listener), response -> { - assertEquals("the_pit_id+++", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id+++"), response.pointInTimeId()); }); assertEquals(1L, client.getPitContextCounter()); @@ -306,11 +308,6 @@ public void testPitInjectionIfPitNotSupported() throws InterruptedException { public void testDisablePit() throws InterruptedException { TransformConfig.Builder configBuilder = new TransformConfig.Builder(TransformConfigTests.randomTransformConfig()); - if (randomBoolean()) { - // TransformConfigTests.randomTransformConfig never produces remote indices in the source. - // We need to explicitly set the remote index here for coverage. - configBuilder.setSource(new SourceConfig("remote-cluster:remote-index")); - } TransformConfig config = configBuilder.build(); boolean pitEnabled = TransformEffectiveSettings.isPitDisabled(config.getSettings()) == false; @@ -357,7 +354,7 @@ public void testDisablePit() throws InterruptedException { this.assertAsync(listener -> indexer.doNextSearch(0, listener), response -> { if (pitEnabled) { - assertEquals("the_pit_id+", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id+"), response.pointInTimeId()); } else { assertNull(response.pointInTimeId()); } @@ -370,19 +367,83 @@ public void testDisablePit() throws InterruptedException { if (pitEnabled) { assertNull(response.pointInTimeId()); } else { - assertEquals("the_pit_id+", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id+"), response.pointInTimeId()); } }); } } + public void testDisablePitWhenThereIsRemoteIndexInSource() throws InterruptedException { + TransformConfig config = new TransformConfig.Builder(TransformConfigTests.randomTransformConfig()) + // Remote index is configured within source + .setSource(new SourceConfig("remote-cluster:remote-index")) + .build(); + boolean pitEnabled = TransformEffectiveSettings.isPitDisabled(config.getSettings()) == false; + + try (var threadPool = createThreadPool()) { + final var client = new PitMockClient(threadPool, true); + MockClientTransformIndexer indexer = new MockClientTransformIndexer( + mock(ThreadPool.class), + mock(ClusterService.class), + mock(IndexNameExpressionResolver.class), + mock(TransformExtension.class), + new TransformServices( + mock(IndexBasedTransformConfigManager.class), + mock(TransformCheckpointService.class), + mock(TransformAuditor.class), + new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY, TimeValue.ZERO), + mock(TransformNode.class) + ), + mock(CheckpointProvider.class), + new AtomicReference<>(IndexerState.STOPPED), + null, + new ParentTaskAssigningClient(client, new TaskId("dummy-node:123456")), + mock(TransformIndexerStats.class), + config, + null, + new TransformCheckpoint( + "transform", + Instant.now().toEpochMilli(), + 0L, + Collections.emptyMap(), + Instant.now().toEpochMilli() + ), + new TransformCheckpoint( + "transform", + Instant.now().toEpochMilli(), + 2L, + Collections.emptyMap(), + Instant.now().toEpochMilli() + ), + new SeqNoPrimaryTermAndIndex(1, 1, TransformInternalIndexConstants.LATEST_INDEX_NAME), + mock(TransformContext.class), + false + ); + + // Because remote index is configured within source, we expect PIT *not* being used regardless the transform settings + this.assertAsync( + listener -> indexer.doNextSearch(0, listener), + response -> assertNull(response.pointInTimeId()) + ); + + // reverse the setting + indexer.applyNewSettings(new SettingsConfig.Builder().setUsePit(pitEnabled == false).build()); + + // Because remote index is configured within source, we expect PIT *not* being used regardless the transform settings + this.assertAsync( + listener -> indexer.doNextSearch(0, listener), + response -> assertNull(response.pointInTimeId()) + ); + } + } + public void testHandlePitIndexNotFound() throws InterruptedException { // simulate a deleted index due to ILM try (var threadPool = createThreadPool()) { final var client = new PitMockClient(threadPool, true); ClientTransformIndexer indexer = createTestIndexer(new ParentTaskAssigningClient(client, new TaskId("dummy-node:123456"))); SearchRequest searchRequest = new SearchRequest("deleted-index").source( - new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder("the_pit_id_on_deleted_index")) + new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(new BytesArray("the_pit_id_on_deleted_index"))) ); Tuple namedSearchRequest = new Tuple<>("test-handle-pit-index-not-found", searchRequest); this.assertAsync(listener -> indexer.doSearch(namedSearchRequest, listener), response -> { @@ -396,7 +457,7 @@ public void testHandlePitIndexNotFound() throws InterruptedException { final var client = new PitMockClient(threadPool, true); ClientTransformIndexer indexer = createTestIndexer(new ParentTaskAssigningClient(client, new TaskId("dummy-node:123456"))); SearchRequest searchRequest = new SearchRequest("essential-deleted-index").source( - new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder("the_pit_id_essential-deleted-index")) + new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(new BytesArray("the_pit_id_essential-deleted-index"))) ); Tuple namedSearchRequest = new Tuple<>("test-handle-pit-index-not-found", searchRequest); indexer.doSearch(namedSearchRequest, ActionListener.wrap(r -> fail("expected a failure, got response"), e -> { @@ -477,7 +538,7 @@ protected void if (request instanceof OpenPointInTimeRequest) { if (pitSupported) { pitContextCounter.incrementAndGet(); - OpenPointInTimeResponse response = new OpenPointInTimeResponse("the_pit_id"); + OpenPointInTimeResponse response = new OpenPointInTimeResponse(new BytesArray("the_pit_id")); listener.onResponse((Response) response); } else { listener.onFailure(new ActionNotFoundTransportException("_pit")); @@ -492,13 +553,13 @@ protected void } else if (request instanceof SearchRequest searchRequest) { // if pit is used and deleted-index is given throw index not found if (searchRequest.pointInTimeBuilder() != null - && searchRequest.pointInTimeBuilder().getEncodedId().equals("the_pit_id_on_deleted_index")) { + && searchRequest.pointInTimeBuilder().getEncodedId().equals(new BytesArray("the_pit_id_on_deleted_index"))) { listener.onFailure(new IndexNotFoundException("deleted-index")); return; } if ((searchRequest.pointInTimeBuilder() != null - && searchRequest.pointInTimeBuilder().getEncodedId().equals("the_pit_id_essential-deleted-index")) + && searchRequest.pointInTimeBuilder().getEncodedId().equals(new BytesArray("the_pit_id_essential-deleted-index"))) || (searchRequest.indices().length > 0 && searchRequest.indices()[0].equals("essential-deleted-index"))) { listener.onFailure(new IndexNotFoundException("essential-deleted-index")); return; @@ -506,7 +567,7 @@ protected void // throw search context missing for the 4th run if (searchRequest.pointInTimeBuilder() != null - && "the_pit_id+++".equals(searchRequest.pointInTimeBuilder().getEncodedId())) { + && new BytesArray("the_pit_id+++").equals(searchRequest.pointInTimeBuilder().getEncodedId())) { listener.onFailure(new SearchContextMissingException(new ShardSearchContextId("sc_missing", 42))); } else { ActionListener.respondAndRelease( @@ -532,7 +593,9 @@ protected void ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY, // copy the pit from the request - searchRequest.pointInTimeBuilder() != null ? searchRequest.pointInTimeBuilder().getEncodedId() + "+" : null + searchRequest.pointInTimeBuilder() != null + ? CompositeBytesReference.of(searchRequest.pointInTimeBuilder().getEncodedId(), new BytesArray("+")) + : null ) ); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatchServiceAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatchServiceAction.java index 30792cce5dfb3..7824f9f46c2f6 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatchServiceAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatchServiceAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestWatchServiceAction extends BaseRestHandler { @@ -55,7 +56,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { final WatcherServiceRequest request = new WatcherServiceRequest().stop(); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(WatcherServiceAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java index a17cb7474a681..98f5daec730bb 100644 --- a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java +++ b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java @@ -206,8 +206,8 @@ public void testIgnoreAbove() throws IOException { assertEquals(0, fields.size()); fields = doc.rootDoc().getFields("_ignored"); - assertEquals(1, fields.size()); - assertEquals("field", fields.get(0).stringValue()); + assertEquals(2, fields.size()); + assertTrue(fields.stream().anyMatch(field -> "field".equals(field.stringValue()))); } public void testBWCIndexVersion() throws IOException { diff --git a/x-pack/qa/freeze-plugin/src/main/java/org/elasticsearch/plugin/freeze/FreezeIndexPlugin.java b/x-pack/qa/freeze-plugin/src/main/java/org/elasticsearch/plugin/freeze/FreezeIndexPlugin.java index bef0bbb1ee3c5..63193b86e3fd1 100644 --- a/x-pack/qa/freeze-plugin/src/main/java/org/elasticsearch/plugin/freeze/FreezeIndexPlugin.java +++ b/x-pack/qa/freeze-plugin/src/main/java/org/elasticsearch/plugin/freeze/FreezeIndexPlugin.java @@ -35,6 +35,7 @@ import java.util.function.Supplier; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Restores the REST endpoint for freezing indices so that the JDBC tests can still freeze indices @@ -76,7 +77,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli boolean freeze = request.path().endsWith("/_freeze"); FreezeRequest freezeRequest = new FreezeRequest(Strings.splitStringByCommaToArray(request.param("index"))); freezeRequest.ackTimeout(request.paramAsTime("timeout", freezeRequest.ackTimeout())); - freezeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", freezeRequest.masterNodeTimeout())); + freezeRequest.masterNodeTimeout(getMasterNodeTimeout(request)); freezeRequest.indicesOptions(IndicesOptions.fromRequest(request, freezeRequest.indicesOptions())); String waitForActiveShards = request.param("wait_for_active_shards"); if (waitForActiveShards != null) { diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle index 87db264356484..ca44d7fe6a85c 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle @@ -45,6 +45,7 @@ def queryingCluster = testClusters.register('querying-cluster') { setting 'cluster.remote.connections_per_cluster', "1" user username: "test_user", password: "x-pack-test-password" + setting 'cluster.remote.my_remote_cluster.skip_unavailable', 'false' if (proxyMode) { setting 'cluster.remote.my_remote_cluster.mode', 'proxy' setting 'cluster.remote.my_remote_cluster.proxy_address', { diff --git a/x-pack/qa/runtime-fields/build.gradle b/x-pack/qa/runtime-fields/build.gradle index dd7d0abc24b19..0c7d4ee770ee6 100644 --- a/x-pack/qa/runtime-fields/build.gradle +++ b/x-pack/qa/runtime-fields/build.gradle @@ -73,6 +73,7 @@ subprojects { 'aggregations/range/Date range', //source only date field should also emit values for numbers, it expects strings only 'search/115_multiple_field_collapsing/two levels fields collapsing', // Field collapsing on a runtime field does not work 'search/111_field_collapsing_with_max_score/*', // Field collapsing on a runtime field does not work + 'search/112_field_collapsing_with_rescore/*', // Field collapsing on a runtime field does not work 'field_caps/30_index_filter/Field caps with index filter', // We don't support filtering field caps on runtime fields. What should we do? 'search/350_point_in_time/point-in-time with index filter', // We don't support filtering pit on runtime fields. 'aggregations/filters_bucket/cache busting', // runtime keyword does not support split_queries_on_whitespace