diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 167830d3ed8b3..19e99852869e6 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -56,7 +56,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["8.15.5", "8.16.0", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.16.1", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 0f2e70addd684..7dd8269f4ffe6 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -272,8 +272,8 @@ steps: env: BWC_VERSION: 8.14.3 - - label: "{{matrix.image}} / 8.15.5 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.5 + - label: "{{matrix.image}} / 8.15.4 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.4 timeout_in_minutes: 300 matrix: setup: @@ -286,10 +286,10 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.15.5 + BWC_VERSION: 8.15.4 - - label: "{{matrix.image}} / 8.16.0 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.0 + - label: "{{matrix.image}} / 8.16.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.1 timeout_in_minutes: 300 matrix: setup: @@ -302,7 +302,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.16.0 + BWC_VERSION: 8.16.1 - label: "{{matrix.image}} / 8.17.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.17.0 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index f68f64332426c..79371d6ddccf5 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -287,8 +287,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.15.5 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.5#bwcTest + - label: 8.15.4 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.4#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -297,7 +297,7 @@ steps: buildDirectory: /dev/shm/bk preemptible: true env: - BWC_VERSION: 8.15.5 + BWC_VERSION: 8.15.4 retry: automatic: - exit_status: "-1" @@ -306,8 +306,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.16.0 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.16.0#bwcTest + - label: 8.16.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.16.1#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -316,7 +316,7 @@ steps: buildDirectory: /dev/shm/bk preemptible: true env: - BWC_VERSION: 8.16.0 + BWC_VERSION: 8.16.1 retry: automatic: - exit_status: "-1" @@ -429,7 +429,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk21 - BWC_VERSION: ["8.15.5", "8.16.0", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.16.1", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -471,7 +471,7 @@ steps: ES_RUNTIME_JAVA: - openjdk21 - openjdk23 - BWC_VERSION: ["8.15.5", "8.16.0", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.16.1", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index b4a4460ff5a80..85522e47a523f 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -14,7 +14,7 @@ BWC_VERSION: - "8.12.2" - "8.13.4" - "8.14.3" - - "8.15.5" - - "8.16.0" + - "8.15.4" + - "8.16.1" - "8.17.0" - "9.0.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 7dad55b653925..9ea3072021bb3 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,4 @@ BWC_VERSION: - - "8.15.5" - - "8.16.0" + - "8.16.1" - "8.17.0" - "9.0.0" diff --git a/docs/changelog/104683.yaml b/docs/changelog/104683.yaml new file mode 100644 index 0000000000000..d4f40b59cfd91 --- /dev/null +++ b/docs/changelog/104683.yaml @@ -0,0 +1,5 @@ +pr: 104683 +summary: "Feature: re-structure document ID generation favoring _id inverted index compression" +area: Logs +type: enhancement +issues: [] diff --git a/docs/changelog/106520.yaml b/docs/changelog/106520.yaml deleted file mode 100644 index c3fe69a4c3dbd..0000000000000 --- a/docs/changelog/106520.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106520 -summary: Updated the transport CA name in Security Auto-Configuration. -area: Security -type: bug -issues: - - 106455 diff --git a/docs/changelog/107047.yaml b/docs/changelog/107047.yaml deleted file mode 100644 index 89caed6f55074..0000000000000 --- a/docs/changelog/107047.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107047 -summary: "Search/Mapping: KnnVectorQueryBuilder support for allowUnmappedFields" -area: Search -type: bug -issues: - - 106846 diff --git a/docs/changelog/107936.yaml b/docs/changelog/107936.yaml deleted file mode 100644 index 89dd57f7a81a5..0000000000000 --- a/docs/changelog/107936.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107936 -summary: Two empty mappings now are created equally -area: Mapping -type: bug -issues: - - 107031 diff --git a/docs/changelog/109017.yaml b/docs/changelog/109017.yaml deleted file mode 100644 index 80bcdd6fc0e25..0000000000000 --- a/docs/changelog/109017.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109017 -summary: "ESQL: Add `MV_PSERIES_WEIGHTED_SUM` for score calculations used by security\ - \ solution" -area: ES|QL -type: "feature" -issues: [ ] diff --git a/docs/changelog/109193.yaml b/docs/changelog/109193.yaml deleted file mode 100644 index 5cc664eaee2cd..0000000000000 --- a/docs/changelog/109193.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109193 -summary: "[ES|QL] explicit cast a string literal to `date_period` and `time_duration`\ - \ in arithmetic operations" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/109414.yaml b/docs/changelog/109414.yaml deleted file mode 100644 index 81b7541bde35b..0000000000000 --- a/docs/changelog/109414.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109414 -summary: Don't fail retention lease sync actions due to capacity constraints -area: CRUD -type: bug -issues: - - 105926 diff --git a/docs/changelog/109583.yaml b/docs/changelog/109583.yaml deleted file mode 100644 index 84757e307b4fb..0000000000000 --- a/docs/changelog/109583.yaml +++ /dev/null @@ -1,29 +0,0 @@ -pr: 109583 -summary: "ESQL: INLINESTATS" -area: ES|QL -type: feature -issues: - - 107589 -highlight: - title: "ESQL: INLINESTATS" - body: |- - This adds the `INLINESTATS` command to ESQL which performs a STATS and - then enriches the results into the output stream. So, this query: - - [source,esql] - ---- - FROM test - | INLINESTATS m=MAX(a * b) BY b - | WHERE m == a * b - | SORT a DESC, b DESC - | LIMIT 3 - ---- - - Produces output like: - - | a | b | m | - | --- | --- | ----- | - | 99 | 999 | 98901 | - | 99 | 998 | 98802 | - | 99 | 997 | 98703 | - notable: true diff --git a/docs/changelog/109667.yaml b/docs/changelog/109667.yaml deleted file mode 100644 index 782a1b1cf6c9b..0000000000000 --- a/docs/changelog/109667.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109667 -summary: Inference autoscaling -area: Machine Learning -type: feature -issues: [] diff --git a/docs/changelog/109684.yaml b/docs/changelog/109684.yaml deleted file mode 100644 index 156f568290cf5..0000000000000 --- a/docs/changelog/109684.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109684 -summary: Avoid `ModelAssignment` deadlock -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/110021.yaml b/docs/changelog/110021.yaml deleted file mode 100644 index 51878b960dfd0..0000000000000 --- a/docs/changelog/110021.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110021 -summary: "[ES|QL] validate `mv_sort` order" -area: ES|QL -type: bug -issues: - - 109910 diff --git a/docs/changelog/110116.yaml b/docs/changelog/110116.yaml deleted file mode 100644 index 9c309b8b80311..0000000000000 --- a/docs/changelog/110116.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110116 -summary: "[ESQL] Make query wrapped by `SingleValueQuery` cacheable" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/110216.yaml b/docs/changelog/110216.yaml deleted file mode 100644 index 00ab20b230e2c..0000000000000 --- a/docs/changelog/110216.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110216 -summary: Register SLM run before snapshotting to save stats -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/110237.yaml b/docs/changelog/110237.yaml deleted file mode 100644 index 076855385376c..0000000000000 --- a/docs/changelog/110237.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 110237 -summary: Optimize the loop processing of URL decoding -area: Infra/REST API -type: enhancement -issues: - - 110235 - diff --git a/docs/changelog/110399.yaml b/docs/changelog/110399.yaml deleted file mode 100644 index 9e04e2656809e..0000000000000 --- a/docs/changelog/110399.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110399 -summary: "[Inference API] Prevent inference endpoints from being deleted if they are\ - \ referenced by semantic text" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/110427.yaml b/docs/changelog/110427.yaml deleted file mode 100644 index ba8a1246e90e4..0000000000000 --- a/docs/changelog/110427.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110427 -summary: "[Inference API] Remove unused Cohere rerank service settings fields in a\ - \ BWC way" -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/110520.yaml b/docs/changelog/110520.yaml deleted file mode 100644 index fba4b84e2279e..0000000000000 --- a/docs/changelog/110520.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110520 -summary: Add protection for OOM during aggregations partial reduction -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/110524.yaml b/docs/changelog/110524.yaml deleted file mode 100644 index 6274c99b09998..0000000000000 --- a/docs/changelog/110524.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110524 -summary: Introduce mode `subobjects=auto` for objects -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/110527.yaml b/docs/changelog/110527.yaml deleted file mode 100644 index 3ab19ecaaaa76..0000000000000 --- a/docs/changelog/110527.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110527 -summary: "ESQL: Add boolean support to Max and Min aggs" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/110554.yaml b/docs/changelog/110554.yaml deleted file mode 100644 index 8c0b896a4c979..0000000000000 --- a/docs/changelog/110554.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110554 -summary: Fix `MapperBuilderContext#isDataStream` when used in dynamic mappers -area: "Mapping" -type: bug -issues: [] diff --git a/docs/changelog/110574.yaml b/docs/changelog/110574.yaml deleted file mode 100644 index 1840838500151..0000000000000 --- a/docs/changelog/110574.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110574 -summary: "ES|QL: better validation for GROK patterns" -area: ES|QL -type: bug -issues: - - 110533 diff --git a/docs/changelog/110578.yaml b/docs/changelog/110578.yaml deleted file mode 100644 index 5d48171e4f328..0000000000000 --- a/docs/changelog/110578.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110578 -summary: Add `size_in_bytes` to enrich cache stats -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/110593.yaml b/docs/changelog/110593.yaml deleted file mode 100644 index 21a5d426ceb46..0000000000000 --- a/docs/changelog/110593.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110593 -summary: "[ES|QL] add tests for stats by constant" -area: ES|QL -type: bug -issues: - - 105383 diff --git a/docs/changelog/110603.yaml b/docs/changelog/110603.yaml deleted file mode 100644 index 4ba19985853df..0000000000000 --- a/docs/changelog/110603.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110603 -summary: Stop iterating over all fields to extract @timestamp value -area: TSDB -type: enhancement -issues: - - 92297 diff --git a/docs/changelog/110606.yaml b/docs/changelog/110606.yaml deleted file mode 100644 index d4ab5234289c4..0000000000000 --- a/docs/changelog/110606.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110606 -summary: Adding mapping validation to the simulate ingest API -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/110630.yaml b/docs/changelog/110630.yaml deleted file mode 100644 index 9bf78e1209753..0000000000000 --- a/docs/changelog/110630.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110630 -summary: Telemetry for inference adaptive allocations -area: Machine Learning -type: feature -issues: [] diff --git a/docs/changelog/110633.yaml b/docs/changelog/110633.yaml deleted file mode 100644 index d4d1dc68cdbcc..0000000000000 --- a/docs/changelog/110633.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110633 -summary: Add manage roles privilege -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/110669.yaml b/docs/changelog/110669.yaml deleted file mode 100644 index 301e756ca373c..0000000000000 --- a/docs/changelog/110669.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110669 -summary: "[ES|QL] Use `RangeQuery` and String in `BinaryComparison` on datetime fields" -area: ES|QL -type: bug -issues: - - 107900 diff --git a/docs/changelog/110676.yaml b/docs/changelog/110676.yaml deleted file mode 100644 index efe7e0e55f18f..0000000000000 --- a/docs/changelog/110676.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110676 -summary: Allow querying `index_mode` -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/110677.yaml b/docs/changelog/110677.yaml deleted file mode 100644 index 72fe5129f3b9d..0000000000000 --- a/docs/changelog/110677.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110677 -summary: Add validation for synthetic source mode in logs mode indices -area: Logs -type: enhancement -issues: [] diff --git a/docs/changelog/110718.yaml b/docs/changelog/110718.yaml deleted file mode 100644 index 526083a8add0c..0000000000000 --- a/docs/changelog/110718.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110718 -summary: "ESQL: Add boolean support to TOP aggregation" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/110734.yaml b/docs/changelog/110734.yaml deleted file mode 100644 index d6dce144b89cd..0000000000000 --- a/docs/changelog/110734.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110734 -summary: Fix bug in ML serverless autoscaling which prevented trained model updates from triggering a scale up -area: Machine Learning -type: bug -issues: [ ] diff --git a/docs/changelog/110796.yaml b/docs/changelog/110796.yaml deleted file mode 100644 index a54a9a08bbd27..0000000000000 --- a/docs/changelog/110796.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110796 -summary: Remove needless forking to GENERIC in `TransportMultiSearchAction` -area: Search -type: bug -issues: [] diff --git a/docs/changelog/110816.yaml b/docs/changelog/110816.yaml deleted file mode 100644 index bf707376ec9ea..0000000000000 --- a/docs/changelog/110816.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110816 -summary: GET _cluster/settings with include_defaults returns the expected fallback value if defined in elasticsearch.yml -area: Infra/Settings -type: bug -issues: - - 110815 diff --git a/docs/changelog/110829.yaml b/docs/changelog/110829.yaml deleted file mode 100644 index 365a14436ec89..0000000000000 --- a/docs/changelog/110829.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 110829 -summary: deprecate `edge_ngram` side parameter -area: Analysis -type: deprecation -issues: [] -deprecation: - title: deprecate `edge_ngram` side parameter - area: Analysis - details: edge_ngram will no longer accept the side parameter. - impact: Users will need to update any usage of edge_ngram token filter that utilizes `side`. If the `back` value was used, they can achieve the same behavior by using the `reverse` token filter. diff --git a/docs/changelog/110833.yaml b/docs/changelog/110833.yaml deleted file mode 100644 index 008fc489ed731..0000000000000 --- a/docs/changelog/110833.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110833 -summary: Make empty string searches be consistent with case (in)sensitivity -area: Search -type: bug -issues: [] diff --git a/docs/changelog/110846.yaml b/docs/changelog/110846.yaml deleted file mode 100644 index 56cc65e83648c..0000000000000 --- a/docs/changelog/110846.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110846 -summary: Fix MLTQuery handling of custom term frequencies -area: Ranking -type: bug -issues: [] diff --git a/docs/changelog/110847.yaml b/docs/changelog/110847.yaml deleted file mode 100644 index 214adc97ac7cb..0000000000000 --- a/docs/changelog/110847.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110847 -summary: SLM Interval based scheduling -area: ILM+SLM -type: feature -issues: [] diff --git a/docs/changelog/110860.yaml b/docs/changelog/110860.yaml deleted file mode 100644 index 5649ca4c88362..0000000000000 --- a/docs/changelog/110860.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110860 -summary: Speedup `CanMatchPreFilterSearchPhase` constructor -area: Search -type: bug -issues: [] diff --git a/docs/changelog/110879.yaml b/docs/changelog/110879.yaml deleted file mode 100644 index d114c6c2aa472..0000000000000 --- a/docs/changelog/110879.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110879 -summary: Add EXP ES|QL function -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/110901.yaml b/docs/changelog/110901.yaml deleted file mode 100644 index 599cb7ce9ec98..0000000000000 --- a/docs/changelog/110901.yaml +++ /dev/null @@ -1,15 +0,0 @@ -pr: 110901 -summary: Set lenient to true by default when using updateable synonyms -area: Analysis -type: breaking -issues: [] -breaking: - title: Set lenient to true by default when using updateable synonyms - area: Analysis - details: | - When a `synonym` or `synonym_graph` token filter is configured with `updateable: true`, the default `lenient` - value will now be `true`. - impact: | - `synonym` or `synonym_graph` token filters configured with `updateable: true` will ignore invalid synonyms by - default. This prevents shard initialization errors on invalid synonyms. - notable: true diff --git a/docs/changelog/110921.yaml b/docs/changelog/110921.yaml deleted file mode 100644 index 28cd569404945..0000000000000 --- a/docs/changelog/110921.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110921 -summary: "ESQL: Support IP fields in MAX and MIN aggregations" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/110928.yaml b/docs/changelog/110928.yaml deleted file mode 100644 index dcb2df6e6cca9..0000000000000 --- a/docs/changelog/110928.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110928 -summary: Dense vector field types updatable for int4 -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/110951.yaml b/docs/changelog/110951.yaml deleted file mode 100644 index ec8bc9cae6347..0000000000000 --- a/docs/changelog/110951.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110951 -summary: Allow task canceling of validate API calls -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/110971.yaml b/docs/changelog/110971.yaml deleted file mode 100644 index 3579f77dc0d1d..0000000000000 --- a/docs/changelog/110971.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110971 -summary: "Search in ES|QL: Add MATCH operator" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/110974.yaml b/docs/changelog/110974.yaml deleted file mode 100644 index c9e8c9b78675e..0000000000000 --- a/docs/changelog/110974.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110974 -summary: Add custom rule parameters to force time shift -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/110986.yaml b/docs/changelog/110986.yaml deleted file mode 100644 index 4e320b19c9578..0000000000000 --- a/docs/changelog/110986.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110986 -summary: Fix unnecessary mustache template evaluation -area: Ingest Node -type: enhancement -issues: - - 110191 diff --git a/docs/changelog/110993.yaml b/docs/changelog/110993.yaml deleted file mode 100644 index 9eb653a09e3a4..0000000000000 --- a/docs/changelog/110993.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110993 -summary: Add link to Max Shards Per Node exception message -area: Distributed -type: enhancement -issues: [] diff --git a/docs/changelog/111015.yaml b/docs/changelog/111015.yaml deleted file mode 100644 index 3cc363c8bbf6b..0000000000000 --- a/docs/changelog/111015.yaml +++ /dev/null @@ -1,15 +0,0 @@ -pr: 111015 -summary: Always allow rebalancing by default -area: Allocation -type: enhancement -issues: [] -highlight: - title: Always allow rebalancing by default - body: |- - In earlier versions of {es} the `cluster.routing.allocation.allow_rebalance` setting defaults to - `indices_all_active` which blocks all rebalancing moves while the cluster is in `yellow` or `red` health. This was - appropriate for the legacy allocator which might do too many rebalancing moves otherwise. Today's allocator has - better support for rebalancing a cluster that is not in `green` health, and expects to be able to rebalance some - shards away from over-full nodes to avoid allocating shards to undesirable locations in the first place. From - version 8.16 `allow_rebalance` setting defaults to `always` unless the legacy allocator is explicitly enabled. - notable: true diff --git a/docs/changelog/111064.yaml b/docs/changelog/111064.yaml deleted file mode 100644 index 848da842b090e..0000000000000 --- a/docs/changelog/111064.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111064 -summary: "ESQL: Fix Double operations returning infinite" -area: ES|QL -type: bug -issues: - - 111026 diff --git a/docs/changelog/111071.yaml b/docs/changelog/111071.yaml deleted file mode 100644 index 5e8ab53db3d03..0000000000000 --- a/docs/changelog/111071.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111071 -summary: Use native scalar scorer for int8_flat index -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/111079.yaml b/docs/changelog/111079.yaml deleted file mode 100644 index aac22005f912d..0000000000000 --- a/docs/changelog/111079.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111079 -summary: PUT slm policy should only increase version if actually changed -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/111091.yaml b/docs/changelog/111091.yaml deleted file mode 100644 index 8444681a14a48..0000000000000 --- a/docs/changelog/111091.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111091 -summary: "X-pack/plugin/otel: introduce x-pack-otel plugin" -area: Data streams -type: feature -issues: [] diff --git a/docs/changelog/111105.yaml b/docs/changelog/111105.yaml deleted file mode 100644 index ed32bd1ef7fc3..0000000000000 --- a/docs/changelog/111105.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111105 -summary: "ESQL: TOP aggregation IP support" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/111118.yaml b/docs/changelog/111118.yaml deleted file mode 100644 index c9fe6cb443688..0000000000000 --- a/docs/changelog/111118.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111118 -summary: "[ES|QL] Simplify patterns for subfields" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/111123.yaml b/docs/changelog/111123.yaml deleted file mode 100644 index 605b8607f4082..0000000000000 --- a/docs/changelog/111123.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111123 -summary: Add Lucene segment-level fields stats -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/111154.yaml b/docs/changelog/111154.yaml deleted file mode 100644 index 3297f5005a811..0000000000000 --- a/docs/changelog/111154.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111154 -summary: EIS integration -area: Inference -type: feature -issues: [] diff --git a/docs/changelog/111161.yaml b/docs/changelog/111161.yaml deleted file mode 100644 index c081d555ff1ee..0000000000000 --- a/docs/changelog/111161.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111161 -summary: Add support for templates when validating mappings in the simulate ingest - API -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/111181.yaml b/docs/changelog/111181.yaml deleted file mode 100644 index 7f9f5937b7652..0000000000000 --- a/docs/changelog/111181.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111181 -summary: "[Inference API] Add Alibaba Cloud AI Search Model support to Inference API" -area: Machine Learning -type: enhancement -issues: [ ] diff --git a/docs/changelog/111193.yaml b/docs/changelog/111193.yaml deleted file mode 100644 index 9e56facb60d3a..0000000000000 --- a/docs/changelog/111193.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111193 -summary: Fix cases of collections with one point -area: Geo -type: bug -issues: - - 110982 diff --git a/docs/changelog/111212.yaml b/docs/changelog/111212.yaml deleted file mode 100644 index 67d1513b3ff6f..0000000000000 --- a/docs/changelog/111212.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111212 -summary: Fix score count validation in reranker response -area: Ranking -type: bug -issues: - - 111202 diff --git a/docs/changelog/111215.yaml b/docs/changelog/111215.yaml deleted file mode 100644 index dc044c2283fc4..0000000000000 --- a/docs/changelog/111215.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111215 -summary: Make `SnapshotLifecycleStats` immutable so `SnapshotLifecycleMetadata.EMPTY` - isn't changed as side-effect -area: ILM+SLM -type: bug -issues: [] diff --git a/docs/changelog/111225.yaml b/docs/changelog/111225.yaml deleted file mode 100644 index bcd344847cfd2..0000000000000 --- a/docs/changelog/111225.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111225 -summary: Upgrade Azure SDK -area: Snapshot/Restore -type: upgrade -issues: [] diff --git a/docs/changelog/111226.yaml b/docs/changelog/111226.yaml deleted file mode 100644 index 1021a26fa789f..0000000000000 --- a/docs/changelog/111226.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111226 -summary: "ES|QL: add Telemetry API and track top functions" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/111238.yaml b/docs/changelog/111238.yaml deleted file mode 100644 index b918b754ff595..0000000000000 --- a/docs/changelog/111238.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111238 -summary: Fix validation of TEXT fields with case insensitive comparison -area: EQL -type: bug -issues: - - 111235 diff --git a/docs/changelog/111245.yaml b/docs/changelog/111245.yaml deleted file mode 100644 index 384373d52cb20..0000000000000 --- a/docs/changelog/111245.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111245 -summary: Truncating watcher history if it is too large -area: Watcher -type: bug -issues: - - 94745 diff --git a/docs/changelog/111274.yaml b/docs/changelog/111274.yaml deleted file mode 100644 index e26bcc03ce118..0000000000000 --- a/docs/changelog/111274.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111274 -summary: Include account name in Azure settings exceptions -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/111284.yaml b/docs/changelog/111284.yaml deleted file mode 100644 index f87649a134af6..0000000000000 --- a/docs/changelog/111284.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111284 -summary: Update `semantic_text` field to support indexing numeric and boolean data - types -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/111311.yaml b/docs/changelog/111311.yaml deleted file mode 100644 index 5786e11e885e2..0000000000000 --- a/docs/changelog/111311.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111311 -summary: Adding support for data streams with a match-all template -area: Data streams -type: bug -issues: - - 111204 diff --git a/docs/changelog/111315.yaml b/docs/changelog/111315.yaml deleted file mode 100644 index 0e2e56898b51c..0000000000000 --- a/docs/changelog/111315.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111315 -summary: Add link to flood-stage watermark exception message -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/111316.yaml b/docs/changelog/111316.yaml deleted file mode 100644 index 0d915cd1ec3ea..0000000000000 --- a/docs/changelog/111316.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111316 -summary: "[Service Account] Add `AutoOps` account" -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/111336.yaml b/docs/changelog/111336.yaml deleted file mode 100644 index d5bf602cb7a88..0000000000000 --- a/docs/changelog/111336.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111336 -summary: Use the same chunking configurations for models in the Elasticsearch service -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/111344.yaml b/docs/changelog/111344.yaml deleted file mode 100644 index 3d5988054749d..0000000000000 --- a/docs/changelog/111344.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111344 -summary: Add support for Azure Managed Identity -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/111367.yaml b/docs/changelog/111367.yaml deleted file mode 100644 index 89e6c1d3b4da4..0000000000000 --- a/docs/changelog/111367.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111367 -summary: "ESQL: Add Values aggregation tests, fix `ConstantBytesRefBlock` memory handling" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/111412.yaml b/docs/changelog/111412.yaml deleted file mode 100644 index 297fa77cd2664..0000000000000 --- a/docs/changelog/111412.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111412 -summary: Make enrich cache based on memory usage -area: Ingest Node -type: enhancement -issues: - - 106081 diff --git a/docs/changelog/111413.yaml b/docs/changelog/111413.yaml deleted file mode 100644 index 0eae45b17d0c4..0000000000000 --- a/docs/changelog/111413.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111413 -summary: "ESQL: Fix synthetic attribute pruning" -area: ES|QL -type: bug -issues: - - 105821 diff --git a/docs/changelog/111420.yaml b/docs/changelog/111420.yaml deleted file mode 100644 index 4e2640ac5762a..0000000000000 --- a/docs/changelog/111420.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111420 -summary: "[Query rules] Add `exclude` query rule type" -area: Relevance -type: feature -issues: [] diff --git a/docs/changelog/111437.yaml b/docs/changelog/111437.yaml deleted file mode 100644 index a50312ffdd1aa..0000000000000 --- a/docs/changelog/111437.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111437 -summary: "[ES|QL] Create `Range` in `PushFiltersToSource` for qualified pushable filters on the same field" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/111445.yaml b/docs/changelog/111445.yaml deleted file mode 100644 index 9ba8e4371bd0c..0000000000000 --- a/docs/changelog/111445.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111445 -summary: Support booleans in routing path -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/111457.yaml b/docs/changelog/111457.yaml deleted file mode 100644 index f4ad4ee53eb0a..0000000000000 --- a/docs/changelog/111457.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111457 -summary: Add support for boolean dimensions -area: TSDB -type: enhancement -issues: - - 111338 diff --git a/docs/changelog/111465.yaml b/docs/changelog/111465.yaml deleted file mode 100644 index 2a8df287427a9..0000000000000 --- a/docs/changelog/111465.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111465 -summary: Add range and regexp Intervals -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/111490.yaml b/docs/changelog/111490.yaml deleted file mode 100644 index b67c16189cc62..0000000000000 --- a/docs/changelog/111490.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111490 -summary: Temporarily return both `modelId` and `inferenceId` for GET /_inference until we migrate clients to only `inferenceId` -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/111501.yaml b/docs/changelog/111501.yaml deleted file mode 100644 index a424142376e52..0000000000000 --- a/docs/changelog/111501.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111501 -summary: "[ES|QL] Combine Disjunctive CIDRMatch" -area: ES|QL -type: enhancement -issues: - - 105143 diff --git a/docs/changelog/111516.yaml b/docs/changelog/111516.yaml deleted file mode 100644 index 96e8bd843f750..0000000000000 --- a/docs/changelog/111516.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111516 -summary: Adding support for `allow_partial_search_results` in PIT -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/111523.yaml b/docs/changelog/111523.yaml deleted file mode 100644 index 202d16c5a426d..0000000000000 --- a/docs/changelog/111523.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111523 -summary: Search coordinator uses `event.ingested` in cluster state to do rewrites -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/111544.yaml b/docs/changelog/111544.yaml deleted file mode 100644 index d4c46f485e664..0000000000000 --- a/docs/changelog/111544.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111544 -summary: "ESQL: Strings support for MAX and MIN aggregations" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/111552.yaml b/docs/changelog/111552.yaml deleted file mode 100644 index d9991788d4fa9..0000000000000 --- a/docs/changelog/111552.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111552 -summary: Siem ea 9521 improve test -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/111576.yaml b/docs/changelog/111576.yaml deleted file mode 100644 index 6d3c331f4bbd5..0000000000000 --- a/docs/changelog/111576.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111576 -summary: Execute shard snapshot tasks in shard-id order -area: Snapshot/Restore -type: enhancement -issues: - - 108739 diff --git a/docs/changelog/111600.yaml b/docs/changelog/111600.yaml deleted file mode 100644 index 0c1e01e1c2e23..0000000000000 --- a/docs/changelog/111600.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111600 -summary: Make ecs@mappings work with OTel attributes -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/111624.yaml b/docs/changelog/111624.yaml deleted file mode 100644 index 7b04b244ef7a7..0000000000000 --- a/docs/changelog/111624.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111624 -summary: Extend logging for dropped warning headers -area: Infra/Core -type: enhancement -issues: - - 90527 diff --git a/docs/changelog/111644.yaml b/docs/changelog/111644.yaml deleted file mode 100644 index 3705d697c95e3..0000000000000 --- a/docs/changelog/111644.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111644 -summary: Force using the last centroid during merging -area: Aggregations -type: bug -issues: - - 111065 diff --git a/docs/changelog/111655.yaml b/docs/changelog/111655.yaml deleted file mode 100644 index 077714d15a712..0000000000000 --- a/docs/changelog/111655.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111655 -summary: Migrate Inference to `ChunkedToXContent` -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/111683.yaml b/docs/changelog/111683.yaml deleted file mode 100644 index cbb2e5ad71ddc..0000000000000 --- a/docs/changelog/111683.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111683 -summary: Only emit product origin in deprecation log if present -area: Infra/Logging -type: bug -issues: - - 81757 diff --git a/docs/changelog/111689.yaml b/docs/changelog/111689.yaml deleted file mode 100644 index ccb3d4d4f87c5..0000000000000 --- a/docs/changelog/111689.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111689 -summary: Add nanos support to `ZonedDateTime` serialization -area: Infra/Core -type: enhancement -issues: - - 68292 diff --git a/docs/changelog/111690.yaml b/docs/changelog/111690.yaml deleted file mode 100644 index 36e715744ad88..0000000000000 --- a/docs/changelog/111690.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111690 -summary: "ESQL: Support INLINESTATS grouped on expressions" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/111740.yaml b/docs/changelog/111740.yaml deleted file mode 100644 index 48b7ee200e45e..0000000000000 --- a/docs/changelog/111740.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111740 -summary: Fix Start Trial API output acknowledgement header for features -area: License -type: bug -issues: - - 111739 diff --git a/docs/changelog/111749.yaml b/docs/changelog/111749.yaml deleted file mode 100644 index 77e0c65005dd6..0000000000000 --- a/docs/changelog/111749.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111749 -summary: "ESQL: Added `mv_percentile` function" -area: ES|QL -type: feature -issues: - - 111591 diff --git a/docs/changelog/111770.yaml b/docs/changelog/111770.yaml deleted file mode 100644 index 8d6bde6b25ef9..0000000000000 --- a/docs/changelog/111770.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111770 -summary: Integrate IBM watsonx to Inference API for text embeddings -area: Experiences -type: enhancement -issues: [] diff --git a/docs/changelog/111779.yaml b/docs/changelog/111779.yaml deleted file mode 100644 index 52c635490e1e4..0000000000000 --- a/docs/changelog/111779.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 111779 -summary: "ESQL: Fix serialization during `can_match`" -area: ES|QL -type: bug -issues: - - 111701 - - 111726 diff --git a/docs/changelog/111797.yaml b/docs/changelog/111797.yaml deleted file mode 100644 index 00b793a19d9c3..0000000000000 --- a/docs/changelog/111797.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111797 -summary: "ESQL: fix for missing indices error message" -area: ES|QL -type: bug -issues: - - 111712 diff --git a/docs/changelog/111809.yaml b/docs/changelog/111809.yaml deleted file mode 100644 index 5a2f220e3a697..0000000000000 --- a/docs/changelog/111809.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111809 -summary: Add Field caps support for Semantic Text -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/111818.yaml b/docs/changelog/111818.yaml deleted file mode 100644 index c3a632861aae6..0000000000000 --- a/docs/changelog/111818.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111818 -summary: Add tier preference to security index settings allowlist -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/111840.yaml b/docs/changelog/111840.yaml deleted file mode 100644 index c40a9e2aef621..0000000000000 --- a/docs/changelog/111840.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111840 -summary: "ESQL: Add async ID and `is_running` headers to ESQL async query" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/111855.yaml b/docs/changelog/111855.yaml deleted file mode 100644 index 3f15e9c20135a..0000000000000 --- a/docs/changelog/111855.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111855 -summary: "ESQL: Profile more timing information" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/111874.yaml b/docs/changelog/111874.yaml deleted file mode 100644 index 26ec90aa6cd4c..0000000000000 --- a/docs/changelog/111874.yaml +++ /dev/null @@ -1,8 +0,0 @@ -pr: 111874 -summary: "ESQL: BUCKET: allow numerical spans as whole numbers" -area: ES|QL -type: enhancement -issues: - - 104646 - - 109340 - - 105375 diff --git a/docs/changelog/111879.yaml b/docs/changelog/111879.yaml deleted file mode 100644 index b8c2111e1d286..0000000000000 --- a/docs/changelog/111879.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111879 -summary: "ESQL: Have BUCKET generate friendlier intervals" -area: ES|QL -type: enhancement -issues: - - 110916 diff --git a/docs/changelog/111915.yaml b/docs/changelog/111915.yaml deleted file mode 100644 index f64c45b82d10c..0000000000000 --- a/docs/changelog/111915.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111915 -summary: Fix DLS & FLS sometimes being enforced when it is disabled -area: Authorization -type: bug -issues: - - 94709 diff --git a/docs/changelog/111917.yaml b/docs/changelog/111917.yaml deleted file mode 100644 index 0dc760d76a698..0000000000000 --- a/docs/changelog/111917.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 111917 -summary: "[ES|QL] Cast mixed numeric types to a common numeric type for Coalesce and\ - \ In at Analyzer" -area: ES|QL -type: enhancement -issues: - - 111486 diff --git a/docs/changelog/111937.yaml b/docs/changelog/111937.yaml deleted file mode 100644 index 7d856e29d54c5..0000000000000 --- a/docs/changelog/111937.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111937 -summary: Handle `BigInteger` in xcontent copy -area: Infra/Core -type: bug -issues: - - 111812 diff --git a/docs/changelog/111948.yaml b/docs/changelog/111948.yaml deleted file mode 100644 index a3a592abaf1ca..0000000000000 --- a/docs/changelog/111948.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111948 -summary: Upgrade xcontent to Jackson 2.17.0 -area: Infra/Core -type: upgrade -issues: [] diff --git a/docs/changelog/111950.yaml b/docs/changelog/111950.yaml deleted file mode 100644 index 3f23c17d8e652..0000000000000 --- a/docs/changelog/111950.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111950 -summary: "[ES|QL] Name parameter with leading underscore" -area: ES|QL -type: enhancement -issues: - - 111821 diff --git a/docs/changelog/111955.yaml b/docs/changelog/111955.yaml deleted file mode 100644 index ebc518203b7cc..0000000000000 --- a/docs/changelog/111955.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 111955 -summary: Clean up dangling S3 multipart uploads -area: Snapshot/Restore -type: enhancement -issues: - - 101169 - - 44971 diff --git a/docs/changelog/111968.yaml b/docs/changelog/111968.yaml deleted file mode 100644 index 9d758c76369e9..0000000000000 --- a/docs/changelog/111968.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111968 -summary: "ESQL: don't lose the original casting error message" -area: ES|QL -type: bug -issues: - - 111967 diff --git a/docs/changelog/111969.yaml b/docs/changelog/111969.yaml deleted file mode 100644 index 2d276850c4988..0000000000000 --- a/docs/changelog/111969.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111969 -summary: "[Profiling] add `container.id` field to event index template" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/111972.yaml b/docs/changelog/111972.yaml deleted file mode 100644 index a5bfcd5b0882e..0000000000000 --- a/docs/changelog/111972.yaml +++ /dev/null @@ -1,17 +0,0 @@ -pr: 111972 -summary: Introduce global retention in data stream lifecycle. -area: Data streams -type: feature -issues: [] -highlight: - title: Add global retention in data stream lifecycle - body: |- - Data stream lifecycle now supports configuring retention on a cluster level, - namely global retention. Global retention \nallows us to configure two different - retentions: - - - `data_streams.lifecycle.retention.default` is applied to all data streams managed - by the data stream lifecycle that do not have retention defined on the data stream level. - - `data_streams.lifecycle.retention.max` is applied to all data streams managed by the - data stream lifecycle and it allows any data stream \ndata to be deleted after the `max_retention` has passed. - notable: true diff --git a/docs/changelog/111981.yaml b/docs/changelog/111981.yaml deleted file mode 100644 index 13b8fe4b7e38d..0000000000000 --- a/docs/changelog/111981.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111981 -summary: Allow fields with dots in sparse vector field mapper -area: Mapping -type: enhancement -issues: - - 109118 diff --git a/docs/changelog/112019.yaml b/docs/changelog/112019.yaml deleted file mode 100644 index 7afb207864ed7..0000000000000 --- a/docs/changelog/112019.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112019 -summary: Display effective retention in the relevant data stream APIs -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/112024.yaml b/docs/changelog/112024.yaml deleted file mode 100644 index e426693fba964..0000000000000 --- a/docs/changelog/112024.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112024 -summary: (API) Cluster Health report `unassigned_primary_shards` -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/112026.yaml b/docs/changelog/112026.yaml deleted file mode 100644 index fedf001923ab4..0000000000000 --- a/docs/changelog/112026.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112026 -summary: Create `StreamingHttpResultPublisher` -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112055.yaml b/docs/changelog/112055.yaml deleted file mode 100644 index cdf15b3b37468..0000000000000 --- a/docs/changelog/112055.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112055 -summary: "ESQL: `mv_median_absolute_deviation` function" -area: ES|QL -type: feature -issues: - - 111590 diff --git a/docs/changelog/112058.yaml b/docs/changelog/112058.yaml deleted file mode 100644 index e974b3413582e..0000000000000 --- a/docs/changelog/112058.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112058 -summary: Fix RRF validation for `rank_constant` < 1 -area: Ranking -type: bug -issues: [] diff --git a/docs/changelog/112063.yaml b/docs/changelog/112063.yaml deleted file mode 100644 index 190993967a074..0000000000000 --- a/docs/changelog/112063.yaml +++ /dev/null @@ -1,32 +0,0 @@ -pr: 112063 -summary: Spatial search functions support multi-valued fields in compute engine -area: ES|QL -type: bug -issues: - - 112102 - - 112505 - - 110830 -highlight: - title: "ESQL: Multi-value fields supported in Geospatial predicates" - body: |- - Supporting multi-value fields in `WHERE` predicates is a challenge due to not knowing whether `ALL` or `ANY` - of the values in the field should pass the predicate. - For example, should the field `age:[10,30]` pass the predicate `WHERE age>20` or not? - This ambiguity does not exist with the spatial predicates - `ST_INTERSECTS` and `ST_DISJOINT`, because the choice between `ANY` or `ALL` - is implied by the predicate itself. - Consider a predicate checking a field named `location` against a test geometry named `shape`: - - * `ST_INTERSECTS(field, shape)` - true if `ANY` value can intersect the shape - * `ST_DISJOINT(field, shape)` - true only if `ALL` values are disjoint from the shape - - This works even if the shape argument is itself a complex or compound geometry. - - Similar logic exists for `ST_CONTAINS` and `ST_WITHIN` predicates, but these are not as easily solved - with `ANY` or `ALL`, because a collection of geometries contains another collection if each of the contained - geometries is within at least one of the containing geometries. Evaluating this requires that the multi-value - field is first combined into a single geometry before performing the predicate check. - - * `ST_CONTAINS(field, shape)` - true if the combined geometry contains the shape - * `ST_WITHIN(field, shape)` - true if the combined geometry is within the shape - notable: false diff --git a/docs/changelog/112066.yaml b/docs/changelog/112066.yaml deleted file mode 100644 index 5dd846766bc8e..0000000000000 --- a/docs/changelog/112066.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112066 -summary: Do not treat replica as unassigned if primary recently created and unassigned - time is below a threshold -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/112081.yaml b/docs/changelog/112081.yaml deleted file mode 100644 index a4009e01fca71..0000000000000 --- a/docs/changelog/112081.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112081 -summary: "[ES|QL] Validate index name in parser" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/112100.yaml b/docs/changelog/112100.yaml deleted file mode 100644 index 9135edecb4d77..0000000000000 --- a/docs/changelog/112100.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112100 -summary: Exclude internal data streams from global retention -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/112123.yaml b/docs/changelog/112123.yaml deleted file mode 100644 index 0c0d7ac44cd17..0000000000000 --- a/docs/changelog/112123.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112123 -summary: SLM interval schedule followup - add back `getFieldName` style getters -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/112126.yaml b/docs/changelog/112126.yaml deleted file mode 100644 index f6a7aeb893a5e..0000000000000 --- a/docs/changelog/112126.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112126 -summary: Add support for spatial relationships in point field mapper -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/112133.yaml b/docs/changelog/112133.yaml deleted file mode 100644 index 11109402b7373..0000000000000 --- a/docs/changelog/112133.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112133 -summary: Add telemetry for repository usage -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/112151.yaml b/docs/changelog/112151.yaml deleted file mode 100644 index f5cbfd8da07c2..0000000000000 --- a/docs/changelog/112151.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112151 -summary: Store original source for keywords using a normalizer -area: Logs -type: enhancement -issues: [] diff --git a/docs/changelog/112199.yaml b/docs/changelog/112199.yaml deleted file mode 100644 index eb22f215f9828..0000000000000 --- a/docs/changelog/112199.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112199 -summary: Support docvalues only query in shape field -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/112200.yaml b/docs/changelog/112200.yaml deleted file mode 100644 index 0c2c3d71e3ddf..0000000000000 --- a/docs/changelog/112200.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112200 -summary: "ES|QL: better validation of GROK patterns" -area: ES|QL -type: bug -issues: - - 112111 diff --git a/docs/changelog/112210.yaml b/docs/changelog/112210.yaml deleted file mode 100644 index 6483b8b01315c..0000000000000 --- a/docs/changelog/112210.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112210 -summary: Expose global retention settings via data stream lifecycle API -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/112214.yaml b/docs/changelog/112214.yaml deleted file mode 100644 index 430f95a72bb3f..0000000000000 --- a/docs/changelog/112214.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112214 -summary: '`ByteArrayStreamInput:` Return -1 when there are no more bytes to read' -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/112218.yaml b/docs/changelog/112218.yaml deleted file mode 100644 index c426dd7ade4ed..0000000000000 --- a/docs/changelog/112218.yaml +++ /dev/null @@ -1,9 +0,0 @@ -pr: 112218 -summary: "ESQL: Fix a bug in `MV_PERCENTILE`" -area: ES|QL -type: bug -issues: - - 112193 - - 112180 - - 112187 - - 112188 diff --git a/docs/changelog/112262.yaml b/docs/changelog/112262.yaml deleted file mode 100644 index fe23c14c79c9e..0000000000000 --- a/docs/changelog/112262.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112262 -summary: Check for disabling own user in Put User API -area: Authentication -type: bug -issues: - - 90205 diff --git a/docs/changelog/112263.yaml b/docs/changelog/112263.yaml deleted file mode 100644 index 2d1321f327673..0000000000000 --- a/docs/changelog/112263.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112263 -summary: Fix `TokenService` always appearing used in Feature Usage -area: License -type: bug -issues: - - 61956 diff --git a/docs/changelog/112270.yaml b/docs/changelog/112270.yaml deleted file mode 100644 index 1e6b9c7fc9290..0000000000000 --- a/docs/changelog/112270.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112270 -summary: Support sparse embedding models in the elasticsearch inference service -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112273.yaml b/docs/changelog/112273.yaml deleted file mode 100644 index 3182a1884a145..0000000000000 --- a/docs/changelog/112273.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111181 -summary: "[Inference API] Add Docs for AlibabaCloud AI Search Support for the Inference API" -area: Machine Learning -type: enhancement -issues: [ ] diff --git a/docs/changelog/112277.yaml b/docs/changelog/112277.yaml deleted file mode 100644 index eac474555999a..0000000000000 --- a/docs/changelog/112277.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112277 -summary: Upgrade `repository-azure` dependencies -area: Snapshot/Restore -type: upgrade -issues: [] diff --git a/docs/changelog/112282.yaml b/docs/changelog/112282.yaml deleted file mode 100644 index beea119b06aef..0000000000000 --- a/docs/changelog/112282.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112282 -summary: Adds example plugin for custom ingest processor -area: Ingest Node -type: enhancement -issues: - - 111539 diff --git a/docs/changelog/112294.yaml b/docs/changelog/112294.yaml deleted file mode 100644 index 71ce9eeef584c..0000000000000 --- a/docs/changelog/112294.yaml +++ /dev/null @@ -1,8 +0,0 @@ -pr: 112294 -summary: "Use fallback synthetic source for `copy_to` and doc_values: false cases" -area: Mapping -type: enhancement -issues: - - 110753 - - 110038 - - 109546 diff --git a/docs/changelog/112295.yaml b/docs/changelog/112295.yaml deleted file mode 100644 index ecbd365d03918..0000000000000 --- a/docs/changelog/112295.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112295 -summary: "ESQL: Speed up CASE for some parameters" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/112303.yaml b/docs/changelog/112303.yaml deleted file mode 100644 index a363e621e4c48..0000000000000 --- a/docs/changelog/112303.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112303 -summary: Add 'verbose' flag retrieving `maximum_timestamp` for get data stream API -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/112320.yaml b/docs/changelog/112320.yaml deleted file mode 100644 index d35a08dfa4e91..0000000000000 --- a/docs/changelog/112320.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112320 -summary: Upgrade xcontent to Jackson 2.17.2 -area: Infra/Core -type: upgrade -issues: [] diff --git a/docs/changelog/112330.yaml b/docs/changelog/112330.yaml deleted file mode 100644 index 498698f5175ba..0000000000000 --- a/docs/changelog/112330.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112330 -summary: Add links to network disconnect troubleshooting -area: Network -type: enhancement -issues: [] diff --git a/docs/changelog/112337.yaml b/docs/changelog/112337.yaml deleted file mode 100644 index f7d667e23cfe9..0000000000000 --- a/docs/changelog/112337.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112337 -summary: Add workaround for missing shard gen blob -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/112341.yaml b/docs/changelog/112341.yaml deleted file mode 100644 index 8f44b53ad9998..0000000000000 --- a/docs/changelog/112341.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112341 -summary: Fix DLS using runtime fields and synthetic source -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/112345.yaml b/docs/changelog/112345.yaml deleted file mode 100644 index b922fe3754cbb..0000000000000 --- a/docs/changelog/112345.yaml +++ /dev/null @@ -1,8 +0,0 @@ -pr: 112345 -summary: Allow dimension fields to have multiple values in standard and logsdb index - mode -area: Mapping -type: enhancement -issues: - - 112232 - - 112239 diff --git a/docs/changelog/112348.yaml b/docs/changelog/112348.yaml deleted file mode 100644 index 84110a7cd4f1b..0000000000000 --- a/docs/changelog/112348.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112348 -summary: Introduce repository integrity verification API -area: Snapshot/Restore -type: enhancement -issues: - - 52622 diff --git a/docs/changelog/112350.yaml b/docs/changelog/112350.yaml deleted file mode 100644 index 994cd3a65c633..0000000000000 --- a/docs/changelog/112350.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112350 -summary: "[ESQL] Add `SPACE` function" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/112369.yaml b/docs/changelog/112369.yaml deleted file mode 100644 index fb1c4775f7a12..0000000000000 --- a/docs/changelog/112369.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112369 -summary: Register Task while Streaming -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112397.yaml b/docs/changelog/112397.yaml deleted file mode 100644 index e67478ec69b1c..0000000000000 --- a/docs/changelog/112397.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112397 -summary: Control storing array source with index setting -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/112401.yaml b/docs/changelog/112401.yaml deleted file mode 100644 index 65e9e76ac25f6..0000000000000 --- a/docs/changelog/112401.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112401 -summary: "ESQL: Fix CASE when conditions are multivalued" -area: ES|QL -type: bug -issues: - - 112359 diff --git a/docs/changelog/112405.yaml b/docs/changelog/112405.yaml deleted file mode 100644 index 4e9f095fb80a8..0000000000000 --- a/docs/changelog/112405.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112405 -summary: Improve date expression/remote handling in index names -area: Search -type: bug -issues: - - 112243 diff --git a/docs/changelog/112409.yaml b/docs/changelog/112409.yaml deleted file mode 100644 index bad94b9f5f2be..0000000000000 --- a/docs/changelog/112409.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112409 -summary: Include reason when no nodes are found -area: "Transform" -type: bug -issues: - - 112404 diff --git a/docs/changelog/112412.yaml b/docs/changelog/112412.yaml deleted file mode 100644 index fda53ebd1ade0..0000000000000 --- a/docs/changelog/112412.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112412 -summary: Expose `HexFormat` in Painless -area: Infra/Scripting -type: enhancement -issues: [] diff --git a/docs/changelog/112431.yaml b/docs/changelog/112431.yaml deleted file mode 100644 index b8c1197bdc7ef..0000000000000 --- a/docs/changelog/112431.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112431 -summary: "Async search: Add ID and \"is running\" http headers" -area: Search -type: feature -issues: - - 109576 diff --git a/docs/changelog/112440.yaml b/docs/changelog/112440.yaml deleted file mode 100644 index f208474fa2686..0000000000000 --- a/docs/changelog/112440.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112440 -summary: "logs-apm.error-*: define log.level field as keyword" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/112451.yaml b/docs/changelog/112451.yaml deleted file mode 100644 index aa852cf5e2a1a..0000000000000 --- a/docs/changelog/112451.yaml +++ /dev/null @@ -1,29 +0,0 @@ -pr: 112451 -summary: Update data stream lifecycle telemetry to track global retention -area: Data streams -type: breaking -issues: [] -breaking: - title: Update data stream lifecycle telemetry to track global retention - area: REST API - details: |- - In this release we introduced global retention settings that fulfil the following criteria: - - - a data stream managed by the data stream lifecycle, - - a data stream that is not an internal data stream. - - As a result, we defined different types of retention: - - - **data retention**: the retention configured on data stream level by the data stream user or owner - - **default global retention:** the retention configured by an admin on a cluster level and applied to any - data stream that doesn't have data retention and fulfils the criteria. - - **max global retention:** the retention configured by an admin to guard against having long retention periods. - Any data stream that fulfills the criteria will adhere to the data retention unless it exceeds the max retention, - in which case the max global retention applies. - - **effective retention:** the retention that applies on the data stream that fulfill the criteria at a given moment - in time. It takes into consideration all the retention above and resolves it to the retention that will take effect. - - Considering the above changes, having a field named `retention` in the usage API was confusing. For this reason, we - renamed it to `data_retention` and added telemetry about the other configurations too. - impact: Users that use the field `data_lifecycle.retention` should use the `data_lifecycle.data_retention` - notable: false diff --git a/docs/changelog/112481.yaml b/docs/changelog/112481.yaml deleted file mode 100644 index 3e539ce8e4b75..0000000000000 --- a/docs/changelog/112481.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112481 -summary: Validate streaming HTTP Response -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112489.yaml b/docs/changelog/112489.yaml deleted file mode 100644 index ebc84927b0e76..0000000000000 --- a/docs/changelog/112489.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112489 -summary: "ES|QL: better validation for RLIKE patterns" -area: ES|QL -type: bug -issues: - - 112485 diff --git a/docs/changelog/112508.yaml b/docs/changelog/112508.yaml deleted file mode 100644 index 3945ebd226ac4..0000000000000 --- a/docs/changelog/112508.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112508 -summary: "[ML] Create Inference API will no longer return model_id and now only return inference_id" -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/112512.yaml b/docs/changelog/112512.yaml deleted file mode 100644 index a9812784ccfca..0000000000000 --- a/docs/changelog/112512.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112512 -summary: Add Completion Inference API for Alibaba Cloud AI Search Model -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112519.yaml b/docs/changelog/112519.yaml deleted file mode 100644 index aa8a942ef0f58..0000000000000 --- a/docs/changelog/112519.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112519 -summary: Lower the memory footprint when creating `DelayedBucket` -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/112547.yaml b/docs/changelog/112547.yaml deleted file mode 100644 index 7f42f2a82976e..0000000000000 --- a/docs/changelog/112547.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112547 -summary: Remove reduce and `reduceContext` from `DelayedBucket` -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/112565.yaml b/docs/changelog/112565.yaml deleted file mode 100644 index be9ec41419a09..0000000000000 --- a/docs/changelog/112565.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112565 -summary: Server-Sent Events for Inference response -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112571.yaml b/docs/changelog/112571.yaml deleted file mode 100644 index f1be2e5c291de..0000000000000 --- a/docs/changelog/112571.yaml +++ /dev/null @@ -1,17 +0,0 @@ -pr: 112571 -summary: Deprecate dot-prefixed indices and composable template index patterns -area: CRUD -type: deprecation -issues: [] -deprecation: - title: Deprecate dot-prefixed indices and composable template index patterns - area: CRUD - details: "Indices beginning with a dot '.' are reserved for system and internal\ - \ indices, and should not be used by and end-user. Additionally, composable index\ - \ templates that contain patterns for dot-prefixed indices should also be avoided,\ - \ as these patterns are meant for internal use only. In a future Elasticsearch\ - \ version, creation of these dot-prefixed indices will no longer be allowed." - impact: "Requests performing an action that would create an index beginning with\ - \ a dot (indexing a document, manual creation, reindex), or creating an index\ - \ template with index patterns beginning with a dot, will contain a deprecation\ - \ header warning about dot-prefixed indices in the response." diff --git a/docs/changelog/112574.yaml b/docs/changelog/112574.yaml deleted file mode 100644 index 3111697a8b97f..0000000000000 --- a/docs/changelog/112574.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112574 -summary: Add privileges required for CDR misconfiguration features to work on AWS SecurityHub integration -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/112595.yaml b/docs/changelog/112595.yaml deleted file mode 100644 index 19ee0368475ae..0000000000000 --- a/docs/changelog/112595.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112595 -summary: Collect and display execution metadata for ES|QL cross cluster searches -area: ES|QL -type: enhancement -issues: - - 112402 diff --git a/docs/changelog/112612.yaml b/docs/changelog/112612.yaml deleted file mode 100644 index d6037e34ff171..0000000000000 --- a/docs/changelog/112612.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112612 -summary: Set `replica_unassigned_buffer_time` in constructor -area: Health -type: bug -issues: [] diff --git a/docs/changelog/112645.yaml b/docs/changelog/112645.yaml deleted file mode 100644 index cf4ef4609a1f3..0000000000000 --- a/docs/changelog/112645.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112645 -summary: Add support for multi-value dimensions -area: Mapping -type: enhancement -issues: - - 110387 diff --git a/docs/changelog/112652.yaml b/docs/changelog/112652.yaml deleted file mode 100644 index c7ddcd4bffdc8..0000000000000 --- a/docs/changelog/112652.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110399 -summary: "[Inference API] alibabacloud ai search service support chunk infer to support semantic_text field" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112665.yaml b/docs/changelog/112665.yaml deleted file mode 100644 index ae2cf7f171f4b..0000000000000 --- a/docs/changelog/112665.yaml +++ /dev/null @@ -1,14 +0,0 @@ -pr: 112665 -summary: Remove zstd feature flag for index codec best compression -area: Codec -type: enhancement -issues: [] -highlight: - title: Enable ZStandard compression for indices with index.codec set to best_compression - body: |- - Before DEFLATE compression was used to compress stored fields in indices with index.codec index setting set to - best_compression, with this change ZStandard is used as compression algorithm to stored fields for indices with - index.codec index setting set to best_compression. The usage ZStandard results in less storage usage with a - similar indexing throughput depending on what options are used. Experiments with indexing logs have shown that - ZStandard offers ~12% lower storage usage and a ~14% higher indexing throughput compared to DEFLATE. - notable: true diff --git a/docs/changelog/112677.yaml b/docs/changelog/112677.yaml deleted file mode 100644 index 89662236c6ca5..0000000000000 --- a/docs/changelog/112677.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112677 -summary: Stream OpenAI Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112678.yaml b/docs/changelog/112678.yaml deleted file mode 100644 index 7a1a9d622a65f..0000000000000 --- a/docs/changelog/112678.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112678 -summary: Make "too many clauses" throw IllegalArgumentException to avoid 500s -area: Search -type: bug -issues: - - 112177 \ No newline at end of file diff --git a/docs/changelog/112687.yaml b/docs/changelog/112687.yaml deleted file mode 100644 index dd079e1b700c4..0000000000000 --- a/docs/changelog/112687.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112687 -summary: Add `TaskManager` to `pluginServices` -area: Infra/Metrics -type: enhancement -issues: [] diff --git a/docs/changelog/112706.yaml b/docs/changelog/112706.yaml deleted file mode 100644 index fc0f5c4c554a1..0000000000000 --- a/docs/changelog/112706.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112706 -summary: Configure keeping source in `FieldMapper` -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/112707.yaml b/docs/changelog/112707.yaml deleted file mode 100644 index 9f16cfcd2b6f2..0000000000000 --- a/docs/changelog/112707.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112707 -summary: Deduplicate `BucketOrder` when deserializing -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/112723.yaml b/docs/changelog/112723.yaml deleted file mode 100644 index dbee3232d1c75..0000000000000 --- a/docs/changelog/112723.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112723 -summary: Improve DateTime error handling and add some bad date tests -area: Search -type: bug -issues: - - 112190 diff --git a/docs/changelog/112768.yaml b/docs/changelog/112768.yaml deleted file mode 100644 index 13d5b8eaae38f..0000000000000 --- a/docs/changelog/112768.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112768 -summary: Deduplicate Kuromoji User Dictionary -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/112826.yaml b/docs/changelog/112826.yaml deleted file mode 100644 index 65c05b4d6035a..0000000000000 --- a/docs/changelog/112826.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112826 -summary: "Multi term intervals: increase max_expansions" -area: Search -type: enhancement -issues: - - 110491 diff --git a/docs/changelog/112850.yaml b/docs/changelog/112850.yaml deleted file mode 100644 index 97a8877f6291c..0000000000000 --- a/docs/changelog/112850.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112850 -summary: Fix synthetic source field names for multi-fields -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/112874.yaml b/docs/changelog/112874.yaml deleted file mode 100644 index 99ed9ed28fa0f..0000000000000 --- a/docs/changelog/112874.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112874 -summary: Reduce heap usage for `AggregatorsReducer` -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/112888.yaml b/docs/changelog/112888.yaml deleted file mode 100644 index 48806a491e531..0000000000000 --- a/docs/changelog/112888.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112888 -summary: Fix `getDatabaseType` for unusual MMDBs -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/112895.yaml b/docs/changelog/112895.yaml deleted file mode 100644 index 59d391f649280..0000000000000 --- a/docs/changelog/112895.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112895 -summary: (logger) change from error to warn for short circuiting user -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/112905.yaml b/docs/changelog/112905.yaml deleted file mode 100644 index aac0b7e9dfb59..0000000000000 --- a/docs/changelog/112905.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112905 -summary: "[ES|QL] Named parameter for field names and field name patterns" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/112916.yaml b/docs/changelog/112916.yaml deleted file mode 100644 index 91dc7f332efc4..0000000000000 --- a/docs/changelog/112916.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112916 -summary: Allow out of range term queries for numeric types -area: Search -type: bug -issues: [] diff --git a/docs/changelog/112929.yaml b/docs/changelog/112929.yaml deleted file mode 100644 index e5f49897432de..0000000000000 --- a/docs/changelog/112929.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112929 -summary: "ES|QL: Add support for cached strings in plan serialization" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/112933.yaml b/docs/changelog/112933.yaml deleted file mode 100644 index 222cd5aadf739..0000000000000 --- a/docs/changelog/112933.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112933 -summary: "Allow incubating Panama Vector in simdvec, and add vectorized `ipByteBin`" -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/112938.yaml b/docs/changelog/112938.yaml deleted file mode 100644 index 82b98871c3352..0000000000000 --- a/docs/changelog/112938.yaml +++ /dev/null @@ -1,35 +0,0 @@ -pr: 112938 -summary: Enhance SORT push-down to Lucene to cover references to fields and ST_DISTANCE function -area: ES|QL -type: enhancement -issues: - - 109973 -highlight: - title: Enhance SORT push-down to Lucene to cover references to fields and ST_DISTANCE function - body: |- - The most used and likely most valuable geospatial search query in Elasticsearch is the sorted proximity search, - finding items within a certain distance of a point of interest and sorting the results by distance. - This has been possible in ES|QL since 8.15.0, but the sorting was done in-memory, not pushed down to Lucene. - Now the sorting is pushed down to Lucene, which results in a significant performance improvement. - - Queries that perform both filtering and sorting on distance are supported. For example: - - [source,esql] - ---- - FROM test - | EVAL distance = ST_DISTANCE(location, TO_GEOPOINT("POINT(37.7749, -122.4194)")) - | WHERE distance < 1000000 - | SORT distance ASC, name DESC - | LIMIT 10 - ---- - - In addition, the support for sorting on EVAL expressions has been extended to cover references to fields: - - [source,esql] - ---- - FROM test - | EVAL ref = field - | SORT ref ASC - | LIMIT 10 - ---- - notable: false diff --git a/docs/changelog/112972.yaml b/docs/changelog/112972.yaml deleted file mode 100644 index 5332ac13fd13f..0000000000000 --- a/docs/changelog/112972.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112972 -summary: "ILM: Add `total_shards_per_node` setting to searchable snapshot" -area: ILM+SLM -type: enhancement -issues: - - 112261 diff --git a/docs/changelog/112973.yaml b/docs/changelog/112973.yaml deleted file mode 100644 index 3ba86a31334ff..0000000000000 --- a/docs/changelog/112973.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112973 -summary: Fix verbose get data stream API not requiring extra privileges -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/113013.yaml b/docs/changelog/113013.yaml deleted file mode 100644 index 1cec31074e806..0000000000000 --- a/docs/changelog/113013.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113013 -summary: Account for `DelayedBucket` before reduction -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/113027.yaml b/docs/changelog/113027.yaml deleted file mode 100644 index 825740cf5691d..0000000000000 --- a/docs/changelog/113027.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113027 -summary: Retrieve the source for objects and arrays in a separate parsing phase -area: Mapping -type: bug -issues: - - 112374 diff --git a/docs/changelog/113051.yaml b/docs/changelog/113051.yaml deleted file mode 100644 index 9be68f9f2b03e..0000000000000 --- a/docs/changelog/113051.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113051 -summary: Add Search Inference ID To Semantic Text Mapping -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/113103.yaml b/docs/changelog/113103.yaml deleted file mode 100644 index 2ed98e0907bae..0000000000000 --- a/docs/changelog/113103.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113103 -summary: "ESQL: Align year diffing to the rest of the units in DATE_DIFF: chronological" -area: ES|QL -type: bug -issues: - - 112482 diff --git a/docs/changelog/113143.yaml b/docs/changelog/113143.yaml deleted file mode 100644 index 4a2044cca0ce4..0000000000000 --- a/docs/changelog/113143.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 113143 -summary: Deprecate dutch_kp and lovins stemmer as they are removed in Lucene 10 -area: Analysis -type: deprecation -issues: [] -deprecation: - title: Deprecate dutch_kp and lovins stemmer as they are removed in Lucene 10 - area: Analysis - details: kp, dutch_kp, dutchKp and lovins stemmers are deprecated and will be removed. - impact: These stemmers will be removed and will be no longer supported. diff --git a/docs/changelog/113158.yaml b/docs/changelog/113158.yaml deleted file mode 100644 index d097ea11b3a23..0000000000000 --- a/docs/changelog/113158.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113158 -summary: Adds a new Inference API for streaming responses back to the user. -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113172.yaml b/docs/changelog/113172.yaml deleted file mode 100644 index 2d03196b0cfbd..0000000000000 --- a/docs/changelog/113172.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113172 -summary: "[ESQL] Add finish() elapsed time to aggregation profiling times" -area: ES|QL -type: enhancement -issues: - - 112950 diff --git a/docs/changelog/113183.yaml b/docs/changelog/113183.yaml deleted file mode 100644 index f30ce9831adb3..0000000000000 --- a/docs/changelog/113183.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113183 -summary: "ESQL: TOP support for strings" -area: ES|QL -type: feature -issues: - - 109849 diff --git a/docs/changelog/113187.yaml b/docs/changelog/113187.yaml deleted file mode 100644 index 397179c4bc3bb..0000000000000 --- a/docs/changelog/113187.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113187 -summary: Preserve Step Info Across ILM Auto Retries -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/113251.yaml b/docs/changelog/113251.yaml deleted file mode 100644 index 49167e6e4c915..0000000000000 --- a/docs/changelog/113251.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113251 -summary: Span term query to convert to match no docs when unmapped field is targeted -area: Search -type: bug -issues: [] diff --git a/docs/changelog/113276.yaml b/docs/changelog/113276.yaml deleted file mode 100644 index 87241878b3ec4..0000000000000 --- a/docs/changelog/113276.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113276 -summary: Adding component template substitutions to the simulate ingest API -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/113280.yaml b/docs/changelog/113280.yaml deleted file mode 100644 index 1d8de0d87dd0d..0000000000000 --- a/docs/changelog/113280.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113280 -summary: Warn for model load failures if they have a status code <500 -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/113286.yaml b/docs/changelog/113286.yaml deleted file mode 100644 index eeffb10b4e638..0000000000000 --- a/docs/changelog/113286.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 113286 -summary: Deprecate legacy params from range query -area: Search -type: deprecation -issues: [] -deprecation: - title: Deprecate legacy params from range query - area: REST API - details: Range query will not longer accept `to`, `from`, `include_lower`, and `include_upper` parameters. - impact: Instead use `gt`, `gte`, `lt` and `lte` parameters. diff --git a/docs/changelog/113297.yaml b/docs/changelog/113297.yaml deleted file mode 100644 index 476619f432639..0000000000000 --- a/docs/changelog/113297.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113297 -summary: "[ES|QL] add reverse function" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/113314.yaml b/docs/changelog/113314.yaml deleted file mode 100644 index c496ad3dd86f1..0000000000000 --- a/docs/changelog/113314.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113314 -summary: "[ES|QL] Check expression resolved before checking its data type in `ImplicitCasting`" -area: ES|QL -type: bug -issues: - - 113242 diff --git a/docs/changelog/113333.yaml b/docs/changelog/113333.yaml deleted file mode 100644 index c6a3584845729..0000000000000 --- a/docs/changelog/113333.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113333 -summary: Upgrade to Lucene 9.12 -area: Search -type: upgrade -issues: [] diff --git a/docs/changelog/113373.yaml b/docs/changelog/113373.yaml deleted file mode 100644 index cbb3829e03425..0000000000000 --- a/docs/changelog/113373.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113373 -summary: Implement `parseBytesRef` for `TimeSeriesRoutingHashFieldType` -area: TSDB -type: bug -issues: - - 112399 diff --git a/docs/changelog/113374.yaml b/docs/changelog/113374.yaml deleted file mode 100644 index f1d5750de0f60..0000000000000 --- a/docs/changelog/113374.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113374 -summary: Add ESQL match function -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/113385.yaml b/docs/changelog/113385.yaml deleted file mode 100644 index 9cee1ebcd4f64..0000000000000 --- a/docs/changelog/113385.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113385 -summary: Small performance improvement in h3 library -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/113387.yaml b/docs/changelog/113387.yaml deleted file mode 100644 index 4819404a55809..0000000000000 --- a/docs/changelog/113387.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113387 -summary: "Add `CircuitBreaker` to TDigest, Step 3: Connect with ESQL CB" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/113498.yaml b/docs/changelog/113498.yaml deleted file mode 100644 index 93b21a1d171eb..0000000000000 --- a/docs/changelog/113498.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113498 -summary: Listing all available databases in the _ingest/geoip/database API -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/113499.yaml b/docs/changelog/113499.yaml deleted file mode 100644 index a4d7f28eb0de4..0000000000000 --- a/docs/changelog/113499.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113499 -summary: Fix synthetic source for flattened field when used with `ignore_above` -area: Logs -type: bug -issues: - - 112044 diff --git a/docs/changelog/113552.yaml b/docs/changelog/113552.yaml deleted file mode 100644 index 48f7da309e82e..0000000000000 --- a/docs/changelog/113552.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113552 -summary: Tag redacted document in ingest metadata -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/113570.yaml b/docs/changelog/113570.yaml deleted file mode 100644 index 8cfad9195c5cd..0000000000000 --- a/docs/changelog/113570.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 113570 -summary: Fix `ignore_above` handling in synthetic source when index level setting - is used -area: Logs -type: bug -issues: - - 113538 diff --git a/docs/changelog/113588.yaml b/docs/changelog/113588.yaml deleted file mode 100644 index e797100443f54..0000000000000 --- a/docs/changelog/113588.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113588 -summary: Add asset criticality indices for `kibana_system_user` -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/113607.yaml b/docs/changelog/113607.yaml deleted file mode 100644 index eb25d2600a555..0000000000000 --- a/docs/changelog/113607.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113607 -summary: Add more `dense_vector` details for cluster stats field stats -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/113613.yaml b/docs/changelog/113613.yaml deleted file mode 100644 index 4b020333aaa36..0000000000000 --- a/docs/changelog/113613.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 113613 -summary: "Add `CircuitBreaker` to TDigest, Step 4: Take into account shallow classes\ - \ size" -area: ES|QL -type: enhancement -issues: - - 113916 diff --git a/docs/changelog/113623.yaml b/docs/changelog/113623.yaml deleted file mode 100644 index 8587687d27080..0000000000000 --- a/docs/changelog/113623.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113623 -summary: "Adding chunking settings to `MistralService,` `GoogleAiStudioService,` and\ - \ `HuggingFaceService`" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113690.yaml b/docs/changelog/113690.yaml deleted file mode 100644 index bd5f1245f471e..0000000000000 --- a/docs/changelog/113690.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113690 -summary: Add object param for keeping synthetic source -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/113735.yaml b/docs/changelog/113735.yaml deleted file mode 100644 index 4f6579c7cb9e0..0000000000000 --- a/docs/changelog/113735.yaml +++ /dev/null @@ -1,28 +0,0 @@ -pr: 113735 -summary: "ESQL: Introduce per agg filter" -area: ES|QL -type: feature -issues: [] -highlight: - title: "ESQL: Introduce per agg filter" - body: |- - Add support for aggregation scoped filters that work dynamically on the - data in each group. - - [source,esql] - ---- - | STATS success = COUNT(*) WHERE 200 <= code AND code < 300, - redirect = COUNT(*) WHERE 300 <= code AND code < 400, - client_err = COUNT(*) WHERE 400 <= code AND code < 500, - server_err = COUNT(*) WHERE 500 <= code AND code < 600, - total_count = COUNT(*) - ---- - - Implementation wise, the base AggregateFunction has been extended to - allow a filter to be passed on. This is required to incorporate the - filter as part of the aggregate equality/identity which would fail with - the filter as an external component. - As part of the process, the serialization for the existing aggregations - had to be fixed so AggregateFunction implementations so that it - delegates to their parent first. - notable: true diff --git a/docs/changelog/113812.yaml b/docs/changelog/113812.yaml deleted file mode 100644 index 04498b4ae5f7e..0000000000000 --- a/docs/changelog/113812.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113812 -summary: Add Streaming Inference spec -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113816.yaml b/docs/changelog/113816.yaml deleted file mode 100644 index 8c7cf14e356b3..0000000000000 --- a/docs/changelog/113816.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113816 -summary: Avoid using concurrent collector manager in `LuceneChangesSnapshot` -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/113825.yaml b/docs/changelog/113825.yaml deleted file mode 100644 index 6d4090fda7ed2..0000000000000 --- a/docs/changelog/113825.yaml +++ /dev/null @@ -1,12 +0,0 @@ -pr: 113825 -summary: Cross-cluster search telemetry -area: Search -type: feature -issues: [] -highlight: - title: Cross-cluster search telemetry - body: |- - The cross-cluster search telemetry is collected when cross-cluster searches - are performed, and is returned as "ccs" field in `_cluster/stats` output. - It also add a new parameter `include_remotes=true` to the `_cluster/stats` API - which will collect data from connected remote clusters. diff --git a/docs/changelog/113873.yaml b/docs/changelog/113873.yaml deleted file mode 100644 index ac52aaf94d518..0000000000000 --- a/docs/changelog/113873.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113873 -summary: Default inference endpoint for ELSER -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113897.yaml b/docs/changelog/113897.yaml deleted file mode 100644 index db0c53518613c..0000000000000 --- a/docs/changelog/113897.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113897 -summary: "Add chunking settings configuration to `CohereService,` `AmazonBedrockService,`\ - \ and `AzureOpenAiService`" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113910.yaml b/docs/changelog/113910.yaml deleted file mode 100644 index aa9d3b61fe768..0000000000000 --- a/docs/changelog/113910.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113910 -summary: Do not expand dots when storing objects in ignored source -area: Logs -type: bug -issues: [] diff --git a/docs/changelog/113911.yaml b/docs/changelog/113911.yaml deleted file mode 100644 index 5c2f93a6ea76a..0000000000000 --- a/docs/changelog/113911.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113911 -summary: Enable OpenAI Streaming -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113967.yaml b/docs/changelog/113967.yaml deleted file mode 100644 index 58b72eba49deb..0000000000000 --- a/docs/changelog/113967.yaml +++ /dev/null @@ -1,13 +0,0 @@ -pr: 113967 -summary: "ESQL: Entirely remove META FUNCTIONS" -area: ES|QL -type: breaking -issues: [] -breaking: - title: "ESQL: Entirely remove META FUNCTIONS" - area: ES|QL - details: | - Removes an undocumented syntax from ESQL: META FUNCTION. This was never - reliable or really useful. Consult the documentation instead. - impact: "Removes an undocumented syntax from ESQL: META FUNCTION" - notable: false diff --git a/docs/changelog/113975.yaml b/docs/changelog/113975.yaml deleted file mode 100644 index 632ba038271bb..0000000000000 --- a/docs/changelog/113975.yaml +++ /dev/null @@ -1,19 +0,0 @@ -pr: 113975 -summary: JDK locale database change -area: Mapping -type: breaking -issues: [] -breaking: - title: JDK locale database change - area: Mapping - details: | - {es} 8.16 changes the version of the JDK that is included from version 22 to version 23. This changes the locale database that is used by Elasticsearch from the COMPAT database to the CLDR database. This change can cause significant differences to the textual date formats accepted by Elasticsearch, and to calculated week-dates. - - If you run {es} 8.16 on JDK version 22 or below, it will use the COMPAT locale database to match the behavior of 8.15. However, starting with {es} 9.0, {es} will use the CLDR database regardless of JDK version it is run on. - impact: | - This affects you if you use custom date formats using textual or week-date field specifiers. If you use date fields or calculated week-dates that change between the COMPAT and CLDR databases, then this change will cause Elasticsearch to reject previously valid date fields as invalid data. You might need to modify your ingest or output integration code to account for the differences between these two JDK versions. - - Starting in version 8.15.2, Elasticsearch will log deprecation warnings if you are using date format specifiers that might change on upgrading to JDK 23. These warnings are visible in Kibana. - - For detailed guidance, refer to <> and the https://ela.st/jdk-23-locales[Elastic blog]. - notable: true diff --git a/docs/changelog/113981.yaml b/docs/changelog/113981.yaml deleted file mode 100644 index 38f3a6f04ae46..0000000000000 --- a/docs/changelog/113981.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113981 -summary: "Adding chunking settings to `GoogleVertexAiService,` `AzureAiStudioService,`\ - \ and `AlibabaCloudSearchService`" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113988.yaml b/docs/changelog/113988.yaml deleted file mode 100644 index d55e7eb2db326..0000000000000 --- a/docs/changelog/113988.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113988 -summary: Track search and fetch failure stats -area: Stats -type: enhancement -issues: [] diff --git a/docs/changelog/113989.yaml b/docs/changelog/113989.yaml deleted file mode 100644 index 7bf50b52d9e07..0000000000000 --- a/docs/changelog/113989.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113989 -summary: Add `max_multipart_parts` setting to S3 repository -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/114021.yaml b/docs/changelog/114021.yaml deleted file mode 100644 index e9dab5dce5685..0000000000000 --- a/docs/changelog/114021.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114021 -summary: "ESQL: Speed up grouping by bytes" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/114080.yaml b/docs/changelog/114080.yaml deleted file mode 100644 index 395768c46369a..0000000000000 --- a/docs/changelog/114080.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114080 -summary: Stream Cohere Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114109.yaml b/docs/changelog/114109.yaml deleted file mode 100644 index ce51ed50f724c..0000000000000 --- a/docs/changelog/114109.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114109 -summary: Update cluster stats for retrievers -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/114128.yaml b/docs/changelog/114128.yaml deleted file mode 100644 index 721649d0d6fe0..0000000000000 --- a/docs/changelog/114128.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114128 -summary: Adding `index_template_substitutions` to the simulate ingest API -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/114157.yaml b/docs/changelog/114157.yaml deleted file mode 100644 index 22e0fda173e98..0000000000000 --- a/docs/changelog/114157.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114157 -summary: Add a `terminate` ingest processor -area: Ingest Node -type: feature -issues: - - 110218 diff --git a/docs/changelog/114168.yaml b/docs/changelog/114168.yaml deleted file mode 100644 index 58f1ab7110e7d..0000000000000 --- a/docs/changelog/114168.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114168 -summary: Add a query rules tester API call -area: Relevance -type: enhancement -issues: [] diff --git a/docs/changelog/114234.yaml b/docs/changelog/114234.yaml deleted file mode 100644 index 0f77ada794bee..0000000000000 --- a/docs/changelog/114234.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114234 -summary: Prevent flattening of ordered and unordered interval sources -area: Search -type: bug -issues: [] diff --git a/docs/changelog/114271.yaml b/docs/changelog/114271.yaml deleted file mode 100644 index 7b47b922ff811..0000000000000 --- a/docs/changelog/114271.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114271 -summary: "[ES|QL] Skip validating remote cluster index names in parser" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/114295.yaml b/docs/changelog/114295.yaml deleted file mode 100644 index 2acdc293a206c..0000000000000 --- a/docs/changelog/114295.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114295 -summary: "Reprocess operator file settings when settings service starts, due to node restart or master node change" -area: Infra/Settings -type: enhancement -issues: [ ] diff --git a/docs/changelog/114309.yaml b/docs/changelog/114309.yaml deleted file mode 100644 index bcd1262062943..0000000000000 --- a/docs/changelog/114309.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114309 -summary: Upgrade to AWS SDK v2 -area: Machine Learning -type: enhancement -issues: - - 110590 diff --git a/docs/changelog/114321.yaml b/docs/changelog/114321.yaml deleted file mode 100644 index 286a72cfee840..0000000000000 --- a/docs/changelog/114321.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114321 -summary: Stream Anthropic Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114358.yaml b/docs/changelog/114358.yaml deleted file mode 100644 index 972bc5bfdbe1c..0000000000000 --- a/docs/changelog/114358.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114358 -summary: "ESQL: Use less memory in listener" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/114363.yaml b/docs/changelog/114363.yaml deleted file mode 100644 index 51ca9ed34a7ca..0000000000000 --- a/docs/changelog/114363.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114363 -summary: Give the kibana system user permission to read security entities -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/114368.yaml b/docs/changelog/114368.yaml deleted file mode 100644 index 6c6e215a1bd49..0000000000000 --- a/docs/changelog/114368.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114368 -summary: "ESQL: Delay construction of warnings" -area: EQL -type: enhancement -issues: [] diff --git a/docs/changelog/114375.yaml b/docs/changelog/114375.yaml deleted file mode 100644 index 7ff7cc60b34ba..0000000000000 --- a/docs/changelog/114375.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114375 -summary: Handle `InternalSendException` inline for non-forking handlers -area: Distributed -type: bug -issues: [] diff --git a/docs/changelog/114382.yaml b/docs/changelog/114382.yaml deleted file mode 100644 index 9f572e14f4737..0000000000000 --- a/docs/changelog/114382.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114382 -summary: "[ES|QL] Add hypot function" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/114386.yaml b/docs/changelog/114386.yaml deleted file mode 100644 index cf9edda9de21e..0000000000000 --- a/docs/changelog/114386.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114386 -summary: Improve handling of failure to create persistent task -area: Task Management -type: bug -issues: [] diff --git a/docs/changelog/114389.yaml b/docs/changelog/114389.yaml deleted file mode 100644 index f56b165bc917e..0000000000000 --- a/docs/changelog/114389.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114389 -summary: Filter empty task settings objects from the API response -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114411.yaml b/docs/changelog/114411.yaml deleted file mode 100644 index 23bff3c8e25ba..0000000000000 --- a/docs/changelog/114411.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114411 -summary: "ESQL: Push down filters even in case of renames in Evals" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/114429.yaml b/docs/changelog/114429.yaml deleted file mode 100644 index 56b0ffe7b43fb..0000000000000 --- a/docs/changelog/114429.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114429 -summary: Add chunking settings configuration to `ElasticsearchService/ELSER` -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114439.yaml b/docs/changelog/114439.yaml deleted file mode 100644 index fd097d02f885f..0000000000000 --- a/docs/changelog/114439.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114439 -summary: Adding new bbq index types behind a feature flag -area: Vector Search -type: feature -issues: [] diff --git a/docs/changelog/114453.yaml b/docs/changelog/114453.yaml deleted file mode 100644 index 0d5345ad9d2a6..0000000000000 --- a/docs/changelog/114453.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114453 -summary: Switch default chunking strategy to sentence -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114457.yaml b/docs/changelog/114457.yaml deleted file mode 100644 index 9558c41852f69..0000000000000 --- a/docs/changelog/114457.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114457 -summary: "[Inference API] Introduce Update API to change some aspects of existing\ - \ inference endpoints" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114464.yaml b/docs/changelog/114464.yaml deleted file mode 100644 index 5f5ee816aa28d..0000000000000 --- a/docs/changelog/114464.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114464 -summary: Stream Azure Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114512.yaml b/docs/changelog/114512.yaml deleted file mode 100644 index 10dea3a2cbac1..0000000000000 --- a/docs/changelog/114512.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114512 -summary: Ensure clean thread context in `MasterService` -area: Cluster Coordination -type: bug -issues: [] diff --git a/docs/changelog/114527.yaml b/docs/changelog/114527.yaml deleted file mode 100644 index 74d95edcd1a1d..0000000000000 --- a/docs/changelog/114527.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114527 -summary: Verify Maxmind database types in the geoip processor -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/114549.yaml b/docs/changelog/114549.yaml deleted file mode 100644 index a6bdbba93876b..0000000000000 --- a/docs/changelog/114549.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114549 -summary: Send mid-stream errors to users -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/114552.yaml b/docs/changelog/114552.yaml deleted file mode 100644 index 00e2f95b5038d..0000000000000 --- a/docs/changelog/114552.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114552 -summary: Improve exception message for bad environment variable placeholders in settings -area: Infra/Settings -type: enhancement -issues: [110858] diff --git a/docs/changelog/114596.yaml b/docs/changelog/114596.yaml deleted file mode 100644 index a36978dcacd8c..0000000000000 --- a/docs/changelog/114596.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114596 -summary: Stream Google Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114638.yaml b/docs/changelog/114638.yaml deleted file mode 100644 index 0386aacfe3e18..0000000000000 --- a/docs/changelog/114638.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 114638 -summary: "ES|QL: Restrict sorting for `_source` and counter field types" -area: ES|QL -type: bug -issues: - - 114423 - - 111976 diff --git a/docs/changelog/114683.yaml b/docs/changelog/114683.yaml deleted file mode 100644 index a677e65a12b0e..0000000000000 --- a/docs/changelog/114683.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114683 -summary: Default inference endpoint for the multilingual-e5-small model -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114715.yaml b/docs/changelog/114715.yaml deleted file mode 100644 index 0894cb2fa42ca..0000000000000 --- a/docs/changelog/114715.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114715 -summary: Ignore unrecognized openai sse fields -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/114719.yaml b/docs/changelog/114719.yaml deleted file mode 100644 index 477d656d5b979..0000000000000 --- a/docs/changelog/114719.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114719 -summary: Wait for allocation on scale up -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114732.yaml b/docs/changelog/114732.yaml deleted file mode 100644 index 42176cdbda443..0000000000000 --- a/docs/changelog/114732.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114732 -summary: Stream Bedrock Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114750.yaml b/docs/changelog/114750.yaml deleted file mode 100644 index f7a3c8c283934..0000000000000 --- a/docs/changelog/114750.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114750 -summary: Create an ml node inference endpoint referencing an existing model -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114774.yaml b/docs/changelog/114774.yaml deleted file mode 100644 index 1becfe427fda0..0000000000000 --- a/docs/changelog/114774.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114774 -summary: "ESQL: Add support for multivalue fields in Arrow output" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/114784.yaml b/docs/changelog/114784.yaml deleted file mode 100644 index 24ebe8b5fc09a..0000000000000 --- a/docs/changelog/114784.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114784 -summary: "[ES|QL] make named parameter for identifier and pattern snapshot" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/114836.yaml b/docs/changelog/114836.yaml deleted file mode 100644 index 6f21d3bfb9327..0000000000000 --- a/docs/changelog/114836.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114836 -summary: Support multi-valued fields in compute engine for ST_DISTANCE -area: ES|QL -type: enhancement -issues: - - 112910 diff --git a/docs/changelog/114848.yaml b/docs/changelog/114848.yaml deleted file mode 100644 index db41e8496f787..0000000000000 --- a/docs/changelog/114848.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114848 -summary: "ESQL: Fix grammar changes around per agg filtering" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/114854.yaml b/docs/changelog/114854.yaml deleted file mode 100644 index 144a10ba85043..0000000000000 --- a/docs/changelog/114854.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 114854 -summary: Adding deprecation warnings for rrf using rank and `sub_searches` -area: Search -type: deprecation -issues: [] -deprecation: - title: Adding deprecation warnings for rrf using rank and `sub_searches` - area: REST API - details: Search API parameter `sub_searches` will no longer be a supported and will be removed in future releases. Similarly, `rrf` can only be used through the specified `retriever` and no longer though the `rank` parameter - impact: Requests specifying rrf through `rank` and/or `sub_searches` elements will be disallowed in a future version. Users should instead utilize the new `retriever` parameter. diff --git a/docs/changelog/114856.yaml b/docs/changelog/114856.yaml deleted file mode 100644 index da7fae3ee18ea..0000000000000 --- a/docs/changelog/114856.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114856 -summary: "OTel mappings: avoid metrics to be rejected when attributes are malformed" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/114888.yaml b/docs/changelog/114888.yaml deleted file mode 100644 index 6b99eb82d10f3..0000000000000 --- a/docs/changelog/114888.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114888 -summary: Fix ST_CENTROID_AGG when no records are aggregated -area: ES|QL -type: bug -issues: - - 106025 diff --git a/docs/changelog/114951.yaml b/docs/changelog/114951.yaml deleted file mode 100644 index 4d40a063e2b02..0000000000000 --- a/docs/changelog/114951.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114951 -summary: Expose cluster-state role mappings in APIs -area: Authentication -type: bug -issues: [] diff --git a/docs/changelog/114990.yaml b/docs/changelog/114990.yaml deleted file mode 100644 index 2575942d15bf5..0000000000000 --- a/docs/changelog/114990.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114990 -summary: Allow for querries on `_tier` to skip shards in the `can_match` phase -area: Search -type: bug -issues: - - 114910 diff --git a/docs/changelog/115031.yaml b/docs/changelog/115031.yaml deleted file mode 100644 index d8d6e1a3f8166..0000000000000 --- a/docs/changelog/115031.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115031 -summary: Bool query early termination should also consider `must_not` clauses -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/115048.yaml b/docs/changelog/115048.yaml deleted file mode 100644 index 10844b83c6d01..0000000000000 --- a/docs/changelog/115048.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115048 -summary: Add timeout and cancellation check to rescore phase -area: Ranking -type: enhancement -issues: [] diff --git a/docs/changelog/115061.yaml b/docs/changelog/115061.yaml deleted file mode 100644 index 7d40d5ae2629e..0000000000000 --- a/docs/changelog/115061.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115061 -summary: "[ES|QL] Simplify syntax of named parameter for identifier and pattern" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/115117.yaml b/docs/changelog/115117.yaml deleted file mode 100644 index de2defcd46afd..0000000000000 --- a/docs/changelog/115117.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115117 -summary: Report JVM stats for all memory pools (97046) -area: Infra/Core -type: bug -issues: - - 97046 diff --git a/docs/changelog/115147.yaml b/docs/changelog/115147.yaml deleted file mode 100644 index 36f40bba1da17..0000000000000 --- a/docs/changelog/115147.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115147 -summary: Fix IPinfo geolocation schema -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/115194.yaml b/docs/changelog/115194.yaml deleted file mode 100644 index 0b201b9f89aa5..0000000000000 --- a/docs/changelog/115194.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 115194 -summary: Update APM Java Agent to support JDK 23 -area: Infra/Metrics -type: upgrade -issues: - - 115101 - - 115100 diff --git a/docs/changelog/115245.yaml b/docs/changelog/115245.yaml deleted file mode 100644 index 294328567c3aa..0000000000000 --- a/docs/changelog/115245.yaml +++ /dev/null @@ -1,8 +0,0 @@ -pr: 115245 -summary: "ESQL: Fix `REVERSE` with backspace character" -area: ES|QL -type: bug -issues: - - 114372 - - 115227 - - 115228 diff --git a/docs/changelog/115312.yaml b/docs/changelog/115312.yaml deleted file mode 100644 index acf6bbc69c36c..0000000000000 --- a/docs/changelog/115312.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115312 -summary: "ESQL: Fix filtered grouping on ords" -area: ES|QL -type: bug -issues: - - 114897 diff --git a/docs/changelog/115317.yaml b/docs/changelog/115317.yaml deleted file mode 100644 index 153f7a52f0674..0000000000000 --- a/docs/changelog/115317.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115317 -summary: Revert "Add `ResolvedExpression` wrapper" -area: Indices APIs -type: bug -issues: [] diff --git a/docs/changelog/115399.yaml b/docs/changelog/115399.yaml deleted file mode 100644 index 9f69657a5d167..0000000000000 --- a/docs/changelog/115399.yaml +++ /dev/null @@ -1,29 +0,0 @@ -pr: 115399 -summary: Adding breaking change entry for retrievers -area: Search -type: breaking -issues: [] -breaking: - title: Reworking RRF retriever to be evaluated during rewrite phase - area: REST API - details: |- - In this release (8.16), we have introduced major changes to the retrievers framework - and how they can be evaluated, focusing mainly on compound retrievers - like `rrf` and `text_similarity_reranker`, which allowed us to support full - composability (i.e. any retriever can be nested under any compound retriever), - as well as supporting additional search features like collapsing, explaining, - aggregations, and highlighting. - - To ensure consistency, and given that this rework is not available until 8.16, - `rrf` and `text_similarity_reranker` retriever queries would now - throw an exception in a mixed cluster scenario, where there are nodes - both in current or later (i.e. >= 8.16) and previous ( <= 8.15) versions. - - As part of the rework, we have also removed the `_rank` property from - the responses of an `rrf` retriever. - impact: |- - - Users will not be able to use the `rrf` and `text_similarity_reranker` retrievers in a mixed cluster scenario - with previous releases (i.e. prior to 8.16), and the request will throw an `IllegalArgumentException`. - - `_rank` has now been removed from the output of the `rrf` retrievers so trying to directly parse the field - will throw an exception - notable: false diff --git a/docs/changelog/115404.yaml b/docs/changelog/115404.yaml deleted file mode 100644 index e443b152955f3..0000000000000 --- a/docs/changelog/115404.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115404 -summary: Fix NPE in Get Deployment Stats -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/115429.yaml b/docs/changelog/115429.yaml deleted file mode 100644 index ddf3c69183000..0000000000000 --- a/docs/changelog/115429.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115429 -summary: "[otel-data] Add more kubernetes aliases" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/115594.yaml b/docs/changelog/115594.yaml deleted file mode 100644 index 91a6089dfb3ce..0000000000000 --- a/docs/changelog/115594.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115594 -summary: Update `BlobCacheBufferedIndexInput::readVLong` to correctly handle negative - long values -area: Search -type: bug -issues: [] diff --git a/docs/changelog/115624.yaml b/docs/changelog/115624.yaml deleted file mode 100644 index 1992ed65679ca..0000000000000 --- a/docs/changelog/115624.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 115624 -summary: "ES|QL: fix LIMIT pushdown past MV_EXPAND" -area: ES|QL -type: bug -issues: - - 102084 - - 102061 diff --git a/docs/changelog/115656.yaml b/docs/changelog/115656.yaml deleted file mode 100644 index 13b612b052fc1..0000000000000 --- a/docs/changelog/115656.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115656 -summary: Fix stream support for `TaskType.ANY` -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/115715.yaml b/docs/changelog/115715.yaml deleted file mode 100644 index 378f2c42e5e50..0000000000000 --- a/docs/changelog/115715.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115715 -summary: Avoid `catch (Throwable t)` in `AmazonBedrockStreamingChatProcessor` -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/115811.yaml b/docs/changelog/115811.yaml deleted file mode 100644 index 292dc91ecb928..0000000000000 --- a/docs/changelog/115811.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115811 -summary: "Prohibit changes to index mode, source, and sort settings during restore" -area: Logs -type: bug -issues: [] diff --git a/docs/changelog/115823.yaml b/docs/changelog/115823.yaml deleted file mode 100644 index a6119e0fa56e4..0000000000000 --- a/docs/changelog/115823.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115823 -summary: Add ECK Role Mapping Cleanup -area: Security -type: bug -issues: [] diff --git a/docs/changelog/115868.yaml b/docs/changelog/115868.yaml deleted file mode 100644 index abe6a63c3a4d8..0000000000000 --- a/docs/changelog/115868.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115868 -summary: Forward bedrock connection errors to user -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/115952.yaml b/docs/changelog/115952.yaml deleted file mode 100644 index ec57a639dc0ae..0000000000000 --- a/docs/changelog/115952.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115952 -summary: "ESQL: Fix a bug in VALUES agg" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/116015.yaml b/docs/changelog/116015.yaml deleted file mode 100644 index 693fad639f2fa..0000000000000 --- a/docs/changelog/116015.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116015 -summary: Empty percentile results no longer throw no_such_element_exception in Anomaly Detection jobs -area: Machine Learning -type: bug -issues: - - 116013 diff --git a/docs/changelog/116086.yaml b/docs/changelog/116086.yaml deleted file mode 100644 index 73ad77d637a46..0000000000000 --- a/docs/changelog/116086.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116086 -summary: "ESQL: Fix DEBUG log of filter" -area: ES|QL -type: bug -issues: - - 116055 diff --git a/docs/changelog/116212.yaml b/docs/changelog/116212.yaml deleted file mode 100644 index 7c8756f4054cd..0000000000000 --- a/docs/changelog/116212.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116212 -summary: Handle status code 0 in S3 CMU response -area: Snapshot/Restore -type: bug -issues: - - 102294 diff --git a/docs/changelog/116266.yaml b/docs/changelog/116266.yaml deleted file mode 100644 index 1fcc0c310962d..0000000000000 --- a/docs/changelog/116266.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116266 -summary: Align dot prefix validation with Serverless -area: Indices APIs -type: bug -issues: [] diff --git a/docs/changelog/116274.yaml b/docs/changelog/116274.yaml deleted file mode 100644 index 9d506c7725afd..0000000000000 --- a/docs/changelog/116274.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116274 -summary: "[ES|QL] Verify aggregation filter's type is boolean to avoid `class_cast_exception`" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/116431.yaml b/docs/changelog/116431.yaml new file mode 100644 index 0000000000000..50c6baf1d01c7 --- /dev/null +++ b/docs/changelog/116431.yaml @@ -0,0 +1,5 @@ +pr: 116431 +summary: Adds support for `input_type` field to Vertex inference service +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/116664.yaml b/docs/changelog/116664.yaml new file mode 100644 index 0000000000000..36915fca39731 --- /dev/null +++ b/docs/changelog/116664.yaml @@ -0,0 +1,6 @@ +pr: 116664 +summary: Hides `hugging_face_elser` service from the `GET _inference/_services API` +area: Machine Learning +type: bug +issues: + - 116644 diff --git a/docs/reference/connector/docs/_connectors-overview-table.asciidoc b/docs/reference/connector/docs/_connectors-overview-table.asciidoc index f25ea3deceeee..f5f8103349dde 100644 --- a/docs/reference/connector/docs/_connectors-overview-table.asciidoc +++ b/docs/reference/connector/docs/_connectors-overview-table.asciidoc @@ -44,7 +44,7 @@ NOTE: All connectors are available as self-managed <>|*GA*|8.12+|8.12+|8.11+|8.13+|8.13+|https://github.com/elastic/connectors/tree/main/connectors/sources/salesforce.py[View code] |<>|*GA*|8.10+|8.10+|8.11+|8.13+|8.13+|https://github.com/elastic/connectors/tree/main/connectors/sources/servicenow.py[View code] |<>|*GA*|8.9+|8.9+|8.9+|8.9+|8.9+|https://github.com/elastic/connectors/tree/main/connectors/sources/sharepoint_online.py[View code] -|<>|*Beta*|8.15+|-|8.11+|8.13+|8.14+|https://github.com/elastic/connectors/tree/main/connectors/sources/sharepoint_server.py[View code] +|<>|*Beta*|8.15+|-|8.11+|8.13+|8.15+|https://github.com/elastic/connectors/tree/main/connectors/sources/sharepoint_server.py[View code] |<>|*Preview*|8.14+|-|-|-|-|https://github.com/elastic/connectors/tree/main/connectors/sources/slack.py[View code] |<>|*Preview*|8.14+|-|-|8.13+|-|https://github.com/elastic/connectors/tree/main/connectors/sources/teams.py[View code] |<>|*Preview*|8.14+|-|8.11+|8.13+|-|https://github.com/elastic/connectors/tree/main/connectors/sources/zoom.py[View code] diff --git a/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc b/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc index 95ff8223b4d20..21d0890e436c5 100644 --- a/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc +++ b/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc @@ -87,14 +87,16 @@ Select an expiration date. (At this expiration date, you will need to generate a + ``` Graph API -- Sites.Read.All +- Sites.Selected - Files.Read.All - Group.Read.All - User.Read.All Sharepoint -- Sites.Read.All +- Sites.Selected ``` +NOTE: If the `Comma-separated list of sites` configuration is set to `*` or if a user enables the toggle button `Enumerate all sites`, the connector requires `Sites.Read.All` permission. + * **Grant admin consent**, using the `Grant Admin Consent` link from the permissions screen. * Save the tenant name (i.e. Domain name) of Azure platform. @@ -138,7 +140,7 @@ Refer to https://learn.microsoft.com/en-us/sharepoint/dev/general-development/ho Here's a summary of why we use these Graph API permissions: -* *Sites.Read.All* is used to fetch the sites and their metadata +* *Sites.Selected* is used to fetch the sites and their metadata * *Files.Read.All* is used to fetch Site Drives and files in these drives * *Groups.Read.All* is used to fetch groups for document-level permissions * *User.Read.All* is used to fetch user information for document-level permissions @@ -546,14 +548,16 @@ Select an expiration date. (At this expiration date, you will need to generate a + ``` Graph API -- Sites.Read.All +- Sites.Selected - Files.Read.All - Group.Read.All - User.Read.All Sharepoint -- Sites.Read.All +- Sites.Selected ``` +NOTE: If the `Comma-separated list of sites` configuration is set to `*` or if a user enables the toggle button `Enumerate all sites`, the connector requires `Sites.Read.All` permission. + * **Grant admin consent**, using the `Grant Admin Consent` link from the permissions screen. * Save the tenant name (i.e. Domain name) of Azure platform. @@ -597,7 +601,7 @@ Refer to https://learn.microsoft.com/en-us/sharepoint/dev/general-development/ho Here's a summary of why we use these Graph API permissions: -* *Sites.Read.All* is used to fetch the sites and their metadata +* *Sites.Selected* is used to fetch the sites and their metadata * *Files.Read.All* is used to fetch Site Drives and files in these drives * *Groups.Read.All* is used to fetch groups for document-level permissions * *User.Read.All* is used to fetch user information for document-level permissions diff --git a/docs/reference/connector/docs/connectors-sharepoint.asciidoc b/docs/reference/connector/docs/connectors-sharepoint.asciidoc index f5590daa1e701..d7a2307a9db80 100644 --- a/docs/reference/connector/docs/connectors-sharepoint.asciidoc +++ b/docs/reference/connector/docs/connectors-sharepoint.asciidoc @@ -67,6 +67,9 @@ The following SharePoint Server versions are compatible: The following configuration fields are required to set up the connector: +`authentication`:: +Authentication mode, either *Basic* or *NTLM*. + `username`:: The username of the account for the SharePoint Server instance. @@ -133,7 +136,7 @@ The connector syncs the following SharePoint object types: [NOTE] ==== * Content from files bigger than 10 MB won't be extracted by default. Use the <> to handle larger binary files. -* Permissions are not synced. **All documents** indexed to an Elastic deployment will be visible to **all users with access** to that Elasticsearch Index. +* Permissions are not synced by default. Enable <> to sync permissions. ==== [discrete#es-connectors-sharepoint-sync-types] @@ -191,7 +194,7 @@ This connector is written in Python using the {connectors-python}[Elastic connec View the {connectors-python}/connectors/sources/sharepoint_server.py[source code for this connector^] (branch _{connectors-branch}_, compatible with Elastic _{minor-version}_). -// Closing the collapsible section +// Closing the collapsible section =============== @@ -254,6 +257,9 @@ Once connected, you'll be able to update these values in Kibana. The following configuration fields are required to set up the connector: +`authentication`:: +Authentication mode, either *Basic* or *NTLM*. + `username`:: The username of the account for the SharePoint Server instance. @@ -408,5 +414,5 @@ This connector is written in Python using the {connectors-python}[Elastic connec View the {connectors-python}/connectors/sources/sharepoint_server.py[source code for this connector^] (branch _{connectors-branch}_, compatible with Elastic _{minor-version}_). -// Closing the collapsible section +// Closing the collapsible section =============== diff --git a/docs/reference/rest-api/security/bulk-create-roles.asciidoc b/docs/reference/rest-api/security/bulk-create-roles.asciidoc index 560e8b74cdd2c..37f49f2445770 100644 --- a/docs/reference/rest-api/security/bulk-create-roles.asciidoc +++ b/docs/reference/rest-api/security/bulk-create-roles.asciidoc @@ -102,7 +102,9 @@ They have no effect for remote clusters configured with the <> can be used to determine +which privileges are allowed per version. For more information, see <>. diff --git a/docs/reference/rest-api/security/create-roles.asciidoc b/docs/reference/rest-api/security/create-roles.asciidoc index a1ab892330e67..d23b9f06e2d87 100644 --- a/docs/reference/rest-api/security/create-roles.asciidoc +++ b/docs/reference/rest-api/security/create-roles.asciidoc @@ -105,7 +105,9 @@ They have no effect for remote clusters configured with the <> can be used to determine +which privileges are allowed per version. For more information, see <>. @@ -176,21 +178,29 @@ POST /_security/role/cli_or_drivers_minimal -------------------------------------------------- // end::sql-queries-permission[] -The following example configures a role with remote indices privileges on a remote cluster: +The following example configures a role with remote indices and remote cluster privileges for a remote cluster: [source,console] -------------------------------------------------- -POST /_security/role/role_with_remote_indices +POST /_security/role/only_remote_access_role { "remote_indices": [ { - "clusters": [ "my_remote" ], <1> + "clusters": ["my_remote"], <1> "names": ["logs*"], <2> "privileges": ["read", "read_cross_cluster", "view_index_metadata"] <3> } + ], + "remote_cluster": [ + { + "clusters": ["my_remote"], <1> + "privileges": ["monitor_stats"] <4> + } ] } -------------------------------------------------- -<1> The remote indices privileges apply to remote cluster with the alias `my_remote`. -<2> Privileges are granted for indices matching pattern `logs*` on the remote cluster ( `my_remote`). +<1> The remote indices and remote cluster privileges apply to remote cluster with the alias `my_remote`. +<2> Privileges are granted for indices matching pattern `logs*` on the remote cluster (`my_remote`). <3> The actual <> granted for `logs*` on `my_remote`. +<4> The actual <> granted for `my_remote`. +Note - only a subset of the cluster privileges are supported for remote clusters. diff --git a/docs/reference/security/authorization/managing-roles.asciidoc b/docs/reference/security/authorization/managing-roles.asciidoc index 535d70cbc5e9c..0c3f520605f07 100644 --- a/docs/reference/security/authorization/managing-roles.asciidoc +++ b/docs/reference/security/authorization/managing-roles.asciidoc @@ -249,12 +249,10 @@ The following describes the structure of a remote cluster permissions entry: <> and <>. This field is required. <2> The cluster level privileges for the remote cluster. The allowed values here are a subset of the -<>. This field is required. +<>. +The <> can be used to determine +which privileges are allowed here. This field is required. -The `monitor_enrich` privilege for remote clusters was introduced in version -8.15.0. Currently, this is the only privilege available for remote clusters and -is required to enable users to use the `ENRICH` keyword in ES|QL queries across -clusters. ==== Example diff --git a/docs/reference/security/authorization/privileges.asciidoc b/docs/reference/security/authorization/privileges.asciidoc index 747b1eef40441..3b69e5c1ba984 100644 --- a/docs/reference/security/authorization/privileges.asciidoc +++ b/docs/reference/security/authorization/privileges.asciidoc @@ -250,6 +250,11 @@ Privileges to list and view details on existing repositories and snapshots. + This privilege is not available in {serverless-full}. +`monitor_stats`:: +Privileges to list and view details of stats. ++ +This privilege is not available in {serverless-full}. + `monitor_text_structure`:: All read-only operations related to the <>. + diff --git a/muted-tests.yml b/muted-tests.yml index ddd806d49ae5f..f2ca6e3d00424 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -192,9 +192,6 @@ tests: - class: org.elasticsearch.action.search.SearchQueryThenFetchAsyncActionTests method: testBottomFieldSort issue: https://github.com/elastic/elasticsearch/issues/116249 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set} - issue: https://github.com/elastic/elasticsearch/issues/116332 - class: org.elasticsearch.xpack.shutdown.NodeShutdownIT method: testAllocationPreventedForRemoval issue: https://github.com/elastic/elasticsearch/issues/116363 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java index 34170d7c0f747..e45555b1dec19 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java @@ -99,7 +99,11 @@ public void testBulkWithWriteIndexAndRouting() { // allowing the auto-generated timestamp to externally be set would allow making the index inconsistent with duplicate docs public void testExternallySetAutoGeneratedTimestamp() { IndexRequest indexRequest = new IndexRequest("index1").source(Collections.singletonMap("foo", "baz")); - indexRequest.autoGenerateId(); + if (randomBoolean()) { + indexRequest.autoGenerateId(); + } else { + indexRequest.autoGenerateTimeBasedId(); + } if (randomBoolean()) { indexRequest.id("test"); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java index bfe46dc4c90f2..36374f7a3a8eb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java @@ -53,7 +53,7 @@ public void testDesiredBalanceGaugeMetricsAreOnlyPublishedByCurrentMaster() thro } } - public void testDesiredBalanceNodeWeightMetrics() { + public void testDesiredBalanceMetrics() { internalCluster().startNodes(2); prepareCreate("test").setSettings(indexSettings(2, 1)).get(); indexRandom(randomBoolean(), "test", between(50, 100)); @@ -68,38 +68,83 @@ public void testDesiredBalanceNodeWeightMetrics() { var nodeIds = internalCluster().clusterService().state().nodes().stream().map(DiscoveryNode::getId).collect(Collectors.toSet()); var nodeNames = internalCluster().clusterService().state().nodes().stream().map(DiscoveryNode::getName).collect(Collectors.toSet()); - final var nodeWeightsMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + final var desiredBalanceNodeWeightsMetrics = telemetryPlugin.getDoubleGaugeMeasurement( DesiredBalanceMetrics.DESIRED_BALANCE_NODE_WEIGHT_METRIC_NAME ); - assertThat(nodeWeightsMetrics.size(), equalTo(2)); - for (var nodeStat : nodeWeightsMetrics) { + assertThat(desiredBalanceNodeWeightsMetrics.size(), equalTo(2)); + for (var nodeStat : desiredBalanceNodeWeightsMetrics) { assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); } - final var nodeShardCountMetrics = telemetryPlugin.getLongGaugeMeasurement( + final var desiredBalanceNodeShardCountMetrics = telemetryPlugin.getLongGaugeMeasurement( DesiredBalanceMetrics.DESIRED_BALANCE_NODE_SHARD_COUNT_METRIC_NAME ); - assertThat(nodeShardCountMetrics.size(), equalTo(2)); - for (var nodeStat : nodeShardCountMetrics) { + assertThat(desiredBalanceNodeShardCountMetrics.size(), equalTo(2)); + for (var nodeStat : desiredBalanceNodeShardCountMetrics) { assertThat(nodeStat.value().longValue(), equalTo(2L)); assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); } - final var nodeWriteLoadMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + final var desiredBalanceNodeWriteLoadMetrics = telemetryPlugin.getDoubleGaugeMeasurement( DesiredBalanceMetrics.DESIRED_BALANCE_NODE_WRITE_LOAD_METRIC_NAME ); - assertThat(nodeWriteLoadMetrics.size(), equalTo(2)); - for (var nodeStat : nodeWriteLoadMetrics) { + assertThat(desiredBalanceNodeWriteLoadMetrics.size(), equalTo(2)); + for (var nodeStat : desiredBalanceNodeWriteLoadMetrics) { assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); } - final var nodeDiskUsageMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + final var desiredBalanceNodeDiskUsageMetrics = telemetryPlugin.getDoubleGaugeMeasurement( DesiredBalanceMetrics.DESIRED_BALANCE_NODE_DISK_USAGE_METRIC_NAME ); - assertThat(nodeDiskUsageMetrics.size(), equalTo(2)); - for (var nodeStat : nodeDiskUsageMetrics) { + assertThat(desiredBalanceNodeDiskUsageMetrics.size(), equalTo(2)); + for (var nodeStat : desiredBalanceNodeDiskUsageMetrics) { + assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + final var currentNodeShardCountMetrics = telemetryPlugin.getLongGaugeMeasurement( + DesiredBalanceMetrics.CURRENT_NODE_SHARD_COUNT_METRIC_NAME + ); + assertThat(currentNodeShardCountMetrics.size(), equalTo(2)); + for (var nodeStat : currentNodeShardCountMetrics) { + assertThat(nodeStat.value().longValue(), equalTo(2L)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + final var currentNodeWriteLoadMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + DesiredBalanceMetrics.CURRENT_NODE_WRITE_LOAD_METRIC_NAME + ); + assertThat(currentNodeWriteLoadMetrics.size(), equalTo(2)); + for (var nodeStat : currentNodeWriteLoadMetrics) { + assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + final var currentNodeDiskUsageMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + DesiredBalanceMetrics.CURRENT_NODE_DISK_USAGE_METRIC_NAME + ); + assertThat(currentNodeDiskUsageMetrics.size(), equalTo(2)); + for (var nodeStat : currentNodeDiskUsageMetrics) { + assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + final var currentNodeUndesiredShardCountMetrics = telemetryPlugin.getLongGaugeMeasurement( + DesiredBalanceMetrics.CURRENT_NODE_UNDESIRED_SHARD_COUNT_METRIC_NAME + ); + assertThat(currentNodeUndesiredShardCountMetrics.size(), equalTo(2)); + for (var nodeStat : currentNodeUndesiredShardCountMetrics) { + assertThat(nodeStat.value().longValue(), greaterThanOrEqualTo(0L)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + final var currentNodeForecastedDiskUsageMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + DesiredBalanceMetrics.CURRENT_NODE_FORECASTED_DISK_USAGE_METRIC_NAME + ); + assertThat(currentNodeForecastedDiskUsageMetrics.size(), equalTo(2)); + for (var nodeStat : currentNodeForecastedDiskUsageMetrics) { assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); @@ -136,6 +181,17 @@ private static void assertMetricsAreBeingPublished(String nodeName, boolean shou testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.DESIRED_BALANCE_NODE_SHARD_COUNT_METRIC_NAME), matcher ); + assertThat(testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_WRITE_LOAD_METRIC_NAME), matcher); + assertThat(testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_DISK_USAGE_METRIC_NAME), matcher); + assertThat(testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_SHARD_COUNT_METRIC_NAME), matcher); + assertThat( + testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_FORECASTED_DISK_USAGE_METRIC_NAME), + matcher + ); + assertThat( + testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_UNDESIRED_SHARD_COUNT_METRIC_NAME), + matcher + ); } private static TestTelemetryPlugin getTelemetryPlugin(String nodeName) { diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 17d9d99cdd4b5..3815d1bba18c3 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -193,7 +193,8 @@ static TransportVersion def(int id) { public static final TransportVersion ROLE_MONITOR_STATS = def(8_787_00_0); public static final TransportVersion DATA_STREAM_INDEX_VERSION_DEPRECATION_CHECK = def(8_788_00_0); public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO = def(8_789_00_0); - public static final TransportVersion SKIP_INNER_HITS_SEARCH_SOURCE = def(8_790_00_0); + public static final TransportVersion VERTEX_AI_INPUT_TYPE_ADDED = def(8_790_00_0); + public static final TransportVersion SKIP_INNER_HITS_SEARCH_SOURCE = def(8_791_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 909d733fd3719..7791ca200a785 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -187,8 +187,8 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_15_2 = new Version(8_15_02_99); public static final Version V_8_15_3 = new Version(8_15_03_99); public static final Version V_8_15_4 = new Version(8_15_04_99); - public static final Version V_8_15_5 = new Version(8_15_05_99); public static final Version V_8_16_0 = new Version(8_16_00_99); + public static final Version V_8_16_1 = new Version(8_16_01_99); public static final Version V_8_17_0 = new Version(8_17_00_99); public static final Version V_9_0_0 = new Version(9_00_00_99); public static final Version CURRENT = V_9_0_0; diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index d0785a60dd0f5..c0811e7424b0d 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -51,6 +51,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.function.Supplier; import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; @@ -76,6 +77,9 @@ public class IndexRequest extends ReplicatedWriteRequest implement private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(IndexRequest.class); private static final TransportVersion PIPELINES_HAVE_RUN_FIELD_ADDED = TransportVersions.V_8_10_X; + private static final Supplier ID_GENERATOR = UUIDs::base64UUID; + private static final Supplier K_SORTED_TIME_BASED_ID_GENERATOR = UUIDs::base64TimeBasedKOrderedUUID; + /** * Max length of the source document to include into string() * @@ -692,10 +696,18 @@ public void process(IndexRouting indexRouting) { * request compatible with the append-only optimization. */ public void autoGenerateId() { - assert id == null; - assert autoGeneratedTimestamp == UNSET_AUTO_GENERATED_TIMESTAMP : "timestamp has already been generated!"; - assert ifSeqNo == UNASSIGNED_SEQ_NO; - assert ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM; + assertBeforeGeneratingId(); + autoGenerateTimestamp(); + id(ID_GENERATOR.get()); + } + + public void autoGenerateTimeBasedId() { + assertBeforeGeneratingId(); + autoGenerateTimestamp(); + id(K_SORTED_TIME_BASED_ID_GENERATOR.get()); + } + + private void autoGenerateTimestamp() { /* * Set the auto generated timestamp so the append only optimization * can quickly test if this request *must* be unique without reaching @@ -704,8 +716,13 @@ public void autoGenerateId() { * never work before 1970, but that's ok. It's after 1970. */ autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); - String uid = UUIDs.base64UUID(); - id(uid); + } + + private void assertBeforeGeneratingId() { + assert id == null; + assert autoGeneratedTimestamp == UNSET_AUTO_GENERATED_TIMESTAMP : "timestamp has already been generated!"; + assert ifSeqNo == UNASSIGNED_SEQ_NO; + assert ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM; } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 0383bbb9bd401..046f4b6b0b251 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy; import org.elasticsearch.cluster.routing.allocation.AllocationStatsService; import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider; import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator; @@ -138,6 +139,7 @@ public ClusterModule( this.clusterPlugins = clusterPlugins; this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins); this.allocationDeciders = new AllocationDeciders(deciderList); + var nodeAllocationStatsProvider = new NodeAllocationStatsProvider(writeLoadForecaster); this.shardsAllocator = createShardsAllocator( settings, clusterService.getClusterSettings(), @@ -146,7 +148,8 @@ public ClusterModule( clusterService, this::reconcile, writeLoadForecaster, - telemetryProvider + telemetryProvider, + nodeAllocationStatsProvider ); this.clusterService = clusterService; this.indexNameExpressionResolver = new IndexNameExpressionResolver(threadPool.getThreadContext(), systemIndices); @@ -160,7 +163,12 @@ public ClusterModule( ); this.allocationService.addAllocFailuresResetListenerTo(clusterService); this.metadataDeleteIndexService = new MetadataDeleteIndexService(settings, clusterService, allocationService); - this.allocationStatsService = new AllocationStatsService(clusterService, clusterInfoService, shardsAllocator, writeLoadForecaster); + this.allocationStatsService = new AllocationStatsService( + clusterService, + clusterInfoService, + shardsAllocator, + nodeAllocationStatsProvider + ); this.telemetryProvider = telemetryProvider; } @@ -400,7 +408,8 @@ private static ShardsAllocator createShardsAllocator( ClusterService clusterService, DesiredBalanceReconcilerAction reconciler, WriteLoadForecaster writeLoadForecaster, - TelemetryProvider telemetryProvider + TelemetryProvider telemetryProvider, + NodeAllocationStatsProvider nodeAllocationStatsProvider ) { Map> allocators = new HashMap<>(); allocators.put(BALANCED_ALLOCATOR, () -> new BalancedShardsAllocator(clusterSettings, writeLoadForecaster)); @@ -412,7 +421,8 @@ private static ShardsAllocator createShardsAllocator( threadPool, clusterService, reconciler, - telemetryProvider + telemetryProvider, + nodeAllocationStatsProvider ) ); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java index 3fb3c182f89cd..1c89d3bf259b5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java @@ -24,6 +24,8 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.TimeSeriesRoutingHashFieldMapper; import org.elasticsearch.transport.Transports; @@ -147,11 +149,15 @@ public void checkIndexSplitAllowed() {} private abstract static class IdAndRoutingOnly extends IndexRouting { private final boolean routingRequired; + private final IndexVersion creationVersion; + private final IndexMode indexMode; IdAndRoutingOnly(IndexMetadata metadata) { super(metadata); + this.creationVersion = metadata.getCreationVersion(); MappingMetadata mapping = metadata.mapping(); this.routingRequired = mapping == null ? false : mapping.routingRequired(); + this.indexMode = metadata.getIndexMode(); } protected abstract int shardId(String id, @Nullable String routing); @@ -161,7 +167,11 @@ public void process(IndexRequest indexRequest) { // generate id if not already provided final String id = indexRequest.id(); if (id == null) { - indexRequest.autoGenerateId(); + if (creationVersion.onOrAfter(IndexVersions.TIME_BASED_K_ORDERED_DOC_ID) && indexMode == IndexMode.LOGSDB) { + indexRequest.autoGenerateTimeBasedId(); + } else { + indexRequest.autoGenerateId(); + } } else if (id.isEmpty()) { throw new IllegalArgumentException("if _id is specified it must not be empty"); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java index 3651f560e6dde..0c82faaaeaa45 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java @@ -10,86 +10,35 @@ package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.cluster.ClusterInfoService; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalance; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.util.Maps; import java.util.Map; +import java.util.function.Supplier; public class AllocationStatsService { - private final ClusterService clusterService; private final ClusterInfoService clusterInfoService; - private final DesiredBalanceShardsAllocator desiredBalanceShardsAllocator; - private final WriteLoadForecaster writeLoadForecaster; + private final Supplier desiredBalanceSupplier; + private final NodeAllocationStatsProvider nodeAllocationStatsProvider; public AllocationStatsService( ClusterService clusterService, ClusterInfoService clusterInfoService, ShardsAllocator shardsAllocator, - WriteLoadForecaster writeLoadForecaster + NodeAllocationStatsProvider nodeAllocationStatsProvider ) { this.clusterService = clusterService; this.clusterInfoService = clusterInfoService; - this.desiredBalanceShardsAllocator = shardsAllocator instanceof DesiredBalanceShardsAllocator allocator ? allocator : null; - this.writeLoadForecaster = writeLoadForecaster; + this.nodeAllocationStatsProvider = nodeAllocationStatsProvider; + this.desiredBalanceSupplier = shardsAllocator instanceof DesiredBalanceShardsAllocator allocator + ? allocator::getDesiredBalance + : () -> null; } public Map stats() { - var state = clusterService.state(); - var info = clusterInfoService.getClusterInfo(); - var desiredBalance = desiredBalanceShardsAllocator != null ? desiredBalanceShardsAllocator.getDesiredBalance() : null; - - var stats = Maps.newMapWithExpectedSize(state.getRoutingNodes().size()); - for (RoutingNode node : state.getRoutingNodes()) { - int shards = 0; - int undesiredShards = 0; - double forecastedWriteLoad = 0.0; - long forecastedDiskUsage = 0; - long currentDiskUsage = 0; - for (ShardRouting shardRouting : node) { - if (shardRouting.relocating()) { - continue; - } - shards++; - IndexMetadata indexMetadata = state.metadata().getIndexSafe(shardRouting.index()); - if (isDesiredAllocation(desiredBalance, shardRouting) == false) { - undesiredShards++; - } - long shardSize = info.getShardSize(shardRouting.shardId(), shardRouting.primary(), 0); - forecastedWriteLoad += writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0); - forecastedDiskUsage += Math.max(indexMetadata.getForecastedShardSizeInBytes().orElse(0), shardSize); - currentDiskUsage += shardSize; - - } - stats.put( - node.nodeId(), - new NodeAllocationStats( - shards, - desiredBalanceShardsAllocator != null ? undesiredShards : -1, - forecastedWriteLoad, - forecastedDiskUsage, - currentDiskUsage - ) - ); - } - - return stats; - } - - private static boolean isDesiredAllocation(DesiredBalance desiredBalance, ShardRouting shardRouting) { - if (desiredBalance == null) { - return true; - } - var assignment = desiredBalance.getAssignment(shardRouting.shardId()); - if (assignment == null) { - return false; - } - return assignment.nodeIds().contains(shardRouting.currentNodeId()); + return nodeAllocationStatsProvider.stats(clusterService.state(), clusterInfoService.getClusterInfo(), desiredBalanceSupplier.get()); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsProvider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsProvider.java new file mode 100644 index 0000000000000..157b409be14d3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsProvider.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalance; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.core.Nullable; + +import java.util.Map; + +public class NodeAllocationStatsProvider { + private final WriteLoadForecaster writeLoadForecaster; + + public NodeAllocationStatsProvider(WriteLoadForecaster writeLoadForecaster) { + this.writeLoadForecaster = writeLoadForecaster; + } + + public Map stats( + ClusterState clusterState, + ClusterInfo clusterInfo, + @Nullable DesiredBalance desiredBalance + ) { + var stats = Maps.newMapWithExpectedSize(clusterState.getRoutingNodes().size()); + for (RoutingNode node : clusterState.getRoutingNodes()) { + int shards = 0; + int undesiredShards = 0; + double forecastedWriteLoad = 0.0; + long forecastedDiskUsage = 0; + long currentDiskUsage = 0; + for (ShardRouting shardRouting : node) { + if (shardRouting.relocating()) { + continue; + } + shards++; + IndexMetadata indexMetadata = clusterState.metadata().getIndexSafe(shardRouting.index()); + if (isDesiredAllocation(desiredBalance, shardRouting) == false) { + undesiredShards++; + } + long shardSize = clusterInfo.getShardSize(shardRouting.shardId(), shardRouting.primary(), 0); + forecastedWriteLoad += writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0); + forecastedDiskUsage += Math.max(indexMetadata.getForecastedShardSizeInBytes().orElse(0), shardSize); + currentDiskUsage += shardSize; + + } + stats.put( + node.nodeId(), + new NodeAllocationStats( + shards, + desiredBalance != null ? undesiredShards : -1, + forecastedWriteLoad, + forecastedDiskUsage, + currentDiskUsage + ) + ); + } + + return stats; + } + + private static boolean isDesiredAllocation(DesiredBalance desiredBalance, ShardRouting shardRouting) { + if (desiredBalance == null) { + return true; + } + var assignment = desiredBalance.getAssignment(shardRouting.shardId()); + if (assignment == null) { + return false; + } + return assignment.nodeIds().contains(shardRouting.currentNodeId()); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java index 682dc85ccd00f..3b22221ea7db4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.time.TimeProvider; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; @@ -37,7 +38,6 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; -import java.util.function.LongSupplier; import java.util.function.Predicate; import static java.util.stream.Collectors.toUnmodifiableSet; @@ -50,7 +50,7 @@ public class DesiredBalanceComputer { private static final Logger logger = LogManager.getLogger(DesiredBalanceComputer.class); private final ShardsAllocator delegateAllocator; - private final LongSupplier timeSupplierMillis; + private final TimeProvider timeProvider; // stats protected final MeanMetric iterations = new MeanMetric(); @@ -73,9 +73,9 @@ public class DesiredBalanceComputer { private TimeValue progressLogInterval; private long maxBalanceComputationTimeDuringIndexCreationMillis; - public DesiredBalanceComputer(ClusterSettings clusterSettings, LongSupplier timeSupplierMillis, ShardsAllocator delegateAllocator) { + public DesiredBalanceComputer(ClusterSettings clusterSettings, TimeProvider timeProvider, ShardsAllocator delegateAllocator) { this.delegateAllocator = delegateAllocator; - this.timeSupplierMillis = timeSupplierMillis; + this.timeProvider = timeProvider; clusterSettings.initializeAndWatch(PROGRESS_LOG_INTERVAL_SETTING, value -> this.progressLogInterval = value); clusterSettings.initializeAndWatch( MAX_BALANCE_COMPUTATION_TIME_DURING_INDEX_CREATION_SETTING, @@ -275,7 +275,7 @@ public DesiredBalance compute( final int iterationCountReportInterval = computeIterationCountReportInterval(routingAllocation); final long timeWarningInterval = progressLogInterval.millis(); - final long computationStartedTime = timeSupplierMillis.getAsLong(); + final long computationStartedTime = timeProvider.relativeTimeInMillis(); long nextReportTime = computationStartedTime + timeWarningInterval; int i = 0; @@ -323,7 +323,7 @@ public DesiredBalance compute( i++; final int iterations = i; - final long currentTime = timeSupplierMillis.getAsLong(); + final long currentTime = timeProvider.relativeTimeInMillis(); final boolean reportByTime = nextReportTime <= currentTime; final boolean reportByIterationCount = i % iterationCountReportInterval == 0; if (reportByTime || reportByIterationCount) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java index d8a2d01f56dff..3ed5bc269e6c4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java @@ -10,6 +10,7 @@ package org.elasticsearch.cluster.routing.allocation.allocator; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.telemetry.metric.DoubleWithAttributes; import org.elasticsearch.telemetry.metric.LongWithAttributes; import org.elasticsearch.telemetry.metric.MeterRegistry; @@ -26,10 +27,12 @@ public record AllocationStats(long unassignedShards, long totalAllocations, long public record NodeWeightStats(long shardCount, double diskUsageInBytes, double writeLoad, double nodeWeight) {} public static final DesiredBalanceMetrics NOOP = new DesiredBalanceMetrics(MeterRegistry.NOOP); + public static final String UNASSIGNED_SHARDS_METRIC_NAME = "es.allocator.desired_balance.shards.unassigned.current"; public static final String TOTAL_SHARDS_METRIC_NAME = "es.allocator.desired_balance.shards.current"; public static final String UNDESIRED_ALLOCATION_COUNT_METRIC_NAME = "es.allocator.desired_balance.allocations.undesired.current"; public static final String UNDESIRED_ALLOCATION_RATIO_METRIC_NAME = "es.allocator.desired_balance.allocations.undesired.ratio"; + public static final String DESIRED_BALANCE_NODE_WEIGHT_METRIC_NAME = "es.allocator.desired_balance.allocations.node_weight.current"; public static final String DESIRED_BALANCE_NODE_SHARD_COUNT_METRIC_NAME = "es.allocator.desired_balance.allocations.node_shard_count.current"; @@ -37,6 +40,15 @@ public record NodeWeightStats(long shardCount, double diskUsageInBytes, double w "es.allocator.desired_balance.allocations.node_write_load.current"; public static final String DESIRED_BALANCE_NODE_DISK_USAGE_METRIC_NAME = "es.allocator.desired_balance.allocations.node_disk_usage_bytes.current"; + + public static final String CURRENT_NODE_SHARD_COUNT_METRIC_NAME = "es.allocator.allocations.node.shard_count.current"; + public static final String CURRENT_NODE_WRITE_LOAD_METRIC_NAME = "es.allocator.allocations.node.write_load.current"; + public static final String CURRENT_NODE_DISK_USAGE_METRIC_NAME = "es.allocator.allocations.node.disk_usage_bytes.current"; + public static final String CURRENT_NODE_UNDESIRED_SHARD_COUNT_METRIC_NAME = + "es.allocator.allocations.node.undesired_shard_count.current"; + public static final String CURRENT_NODE_FORECASTED_DISK_USAGE_METRIC_NAME = + "es.allocator.allocations.node.forecasted_disk_usage_bytes.current"; + public static final AllocationStats EMPTY_ALLOCATION_STATS = new AllocationStats(-1, -1, -1); private volatile boolean nodeIsMaster = false; @@ -56,8 +68,13 @@ public record NodeWeightStats(long shardCount, double diskUsageInBytes, double w private volatile long undesiredAllocations; private final AtomicReference> weightStatsPerNodeRef = new AtomicReference<>(Map.of()); + private final AtomicReference> allocationStatsPerNodeRef = new AtomicReference<>(Map.of()); - public void updateMetrics(AllocationStats allocationStats, Map weightStatsPerNode) { + public void updateMetrics( + AllocationStats allocationStats, + Map weightStatsPerNode, + Map nodeAllocationStats + ) { assert allocationStats != null : "allocation stats cannot be null"; assert weightStatsPerNode != null : "node balance weight stats cannot be null"; if (allocationStats != EMPTY_ALLOCATION_STATS) { @@ -66,6 +83,7 @@ public void updateMetrics(AllocationStats allocationStats, Map getDesiredBalanceNodeShardCountMetrics() { return values; } + private List getCurrentNodeDiskUsageMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = allocationStatsPerNodeRef.get(); + List doubles = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + doubles.add(new DoubleWithAttributes(stats.get(node).currentDiskUsage(), getNodeAttributes(node))); + } + return doubles; + } + + private List getCurrentNodeWriteLoadMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = allocationStatsPerNodeRef.get(); + List doubles = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + doubles.add(new DoubleWithAttributes(stats.get(node).forecastedIngestLoad(), getNodeAttributes(node))); + } + return doubles; + } + + private List getCurrentNodeShardCountMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = allocationStatsPerNodeRef.get(); + List values = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + values.add(new LongWithAttributes(stats.get(node).shards(), getNodeAttributes(node))); + } + return values; + } + + private List getCurrentNodeForecastedDiskUsageMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = allocationStatsPerNodeRef.get(); + List doubles = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + doubles.add(new DoubleWithAttributes(stats.get(node).forecastedDiskUsage(), getNodeAttributes(node))); + } + return doubles; + } + + private List getCurrentNodeUndesiredShardCountMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = allocationStatsPerNodeRef.get(); + List values = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + values.add(new LongWithAttributes(stats.get(node).undesiredShards(), getNodeAttributes(node))); + } + return values; + } + private Map getNodeAttributes(DiscoveryNode node) { return Map.of("node_id", node.getId(), "node_name", node.getName()); } @@ -216,5 +324,6 @@ public void zeroAllMetrics() { totalAllocations = 0; undesiredAllocations = 0; weightStatsPerNodeRef.set(Map.of()); + allocationStatsPerNodeRef.set(Map.of()); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index 129144a3d734b..5ad29debc8f20 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -20,6 +20,8 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceMetrics.AllocationStats; import org.elasticsearch.cluster.routing.allocation.decider.Decision; @@ -34,7 +36,9 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.Comparator; +import java.util.HashMap; import java.util.Iterator; +import java.util.Map; import java.util.Set; import java.util.function.BiFunction; import java.util.stream.Collectors; @@ -71,8 +75,14 @@ public class DesiredBalanceReconciler { private final NodeAllocationOrdering allocationOrdering = new NodeAllocationOrdering(); private final NodeAllocationOrdering moveOrdering = new NodeAllocationOrdering(); private final DesiredBalanceMetrics desiredBalanceMetrics; - - public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool threadPool, DesiredBalanceMetrics desiredBalanceMetrics) { + private final NodeAllocationStatsProvider nodeAllocationStatsProvider; + + public DesiredBalanceReconciler( + ClusterSettings clusterSettings, + ThreadPool threadPool, + DesiredBalanceMetrics desiredBalanceMetrics, + NodeAllocationStatsProvider nodeAllocationStatsProvider + ) { this.desiredBalanceMetrics = desiredBalanceMetrics; this.undesiredAllocationLogInterval = new FrequencyCappedAction( threadPool.relativeTimeInMillisSupplier(), @@ -83,6 +93,7 @@ public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool thre UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING, value -> this.undesiredAllocationsLogThreshold = value ); + this.nodeAllocationStatsProvider = nodeAllocationStatsProvider; } public void reconcile(DesiredBalance desiredBalance, RoutingAllocation allocation) { @@ -143,8 +154,20 @@ void run() { logger.debug("Reconciliation is complete"); - desiredBalanceMetrics.updateMetrics(allocationStats, desiredBalance.weightsPerNode()); + updateDesireBalanceMetrics(allocationStats); + } + } + + private void updateDesireBalanceMetrics(AllocationStats allocationStats) { + var stats = nodeAllocationStatsProvider.stats(allocation.getClusterState(), allocation.clusterInfo(), desiredBalance); + Map nodeAllocationStats = new HashMap<>(stats.size()); + for (var entry : stats.entrySet()) { + var node = allocation.nodes().get(entry.getKey()); + if (node != null) { + nodeAllocationStats.put(node, entry.getValue()); + } } + desiredBalanceMetrics.updateMetrics(allocationStats, desiredBalance.weightsPerNode(), nodeAllocationStats); } private boolean allocateUnassignedInvariant() { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index 5ccb59e29d7dc..bfe8a20f18043 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; @@ -85,15 +86,17 @@ public DesiredBalanceShardsAllocator( ThreadPool threadPool, ClusterService clusterService, DesiredBalanceReconcilerAction reconciler, - TelemetryProvider telemetryProvider + TelemetryProvider telemetryProvider, + NodeAllocationStatsProvider nodeAllocationStatsProvider ) { this( delegateAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, delegateAllocator), + new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator), reconciler, - telemetryProvider + telemetryProvider, + nodeAllocationStatsProvider ); } @@ -103,7 +106,8 @@ public DesiredBalanceShardsAllocator( ClusterService clusterService, DesiredBalanceComputer desiredBalanceComputer, DesiredBalanceReconcilerAction reconciler, - TelemetryProvider telemetryProvider + TelemetryProvider telemetryProvider, + NodeAllocationStatsProvider nodeAllocationStatsProvider ) { this.desiredBalanceMetrics = new DesiredBalanceMetrics(telemetryProvider.getMeterRegistry()); this.delegateAllocator = delegateAllocator; @@ -113,7 +117,8 @@ public DesiredBalanceShardsAllocator( this.desiredBalanceReconciler = new DesiredBalanceReconciler( clusterService.getClusterSettings(), threadPool, - desiredBalanceMetrics + desiredBalanceMetrics, + nodeAllocationStatsProvider ); this.desiredBalanceComputation = new ContinuousComputation<>(threadPool.generic()) { diff --git a/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java b/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java new file mode 100644 index 0000000000000..9c97cb8fe7e85 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.common; + +import java.nio.ByteBuffer; +import java.util.Base64; + +/** + * Generates a base64-encoded, k-ordered UUID string optimized for compression and efficient indexing. + *

+ * This method produces a time-based UUID where slowly changing components like the timestamp appear first, + * improving prefix-sharing and compression during indexing. It ensures uniqueness across nodes by incorporating + * a timestamp, a MAC address, and a sequence ID. + *

+ * Timestamp: Represents the current time in milliseconds, ensuring ordering and uniqueness. + *
+ * MAC Address: Ensures uniqueness across different coordinators. + *
+ * Sequence ID: Differentiates UUIDs generated within the same millisecond, ensuring uniqueness even at high throughput. + *

+ * The result is a compact base64-encoded string, optimized for efficient compression of the _id field in an inverted index. + */ +public class TimeBasedKOrderedUUIDGenerator extends TimeBasedUUIDGenerator { + private static final Base64.Encoder BASE_64_NO_PADDING = Base64.getEncoder().withoutPadding(); + + @Override + public String getBase64UUID() { + final int sequenceId = this.sequenceNumber.incrementAndGet() & 0x00FF_FFFF; + + // Calculate timestamp to ensure ordering and avoid backward movement in case of time shifts. + // Uses AtomicLong to guarantee that timestamp increases even if the system clock moves backward. + // If the sequenceId overflows (reaches 0 within the same millisecond), the timestamp is incremented + // to ensure strict ordering. + long timestamp = this.lastTimestamp.accumulateAndGet( + currentTimeMillis(), + sequenceId == 0 ? (lastTimestamp, currentTimeMillis) -> Math.max(lastTimestamp, currentTimeMillis) + 1 : Math::max + ); + + final byte[] uuidBytes = new byte[15]; + final ByteBuffer buffer = ByteBuffer.wrap(uuidBytes); + + buffer.put((byte) (timestamp >>> 40)); // changes every 35 years + buffer.put((byte) (timestamp >>> 32)); // changes every ~50 days + buffer.put((byte) (timestamp >>> 24)); // changes every ~4.5h + buffer.put((byte) (timestamp >>> 16)); // changes every ~65 secs + + // MAC address of the coordinator might change if there are many coordinators in the cluster + // and the indexing api does not necessarily target the same coordinator. + byte[] macAddress = macAddress(); + assert macAddress.length == 6; + buffer.put(macAddress, 0, macAddress.length); + + buffer.put((byte) (sequenceId >>> 16)); + + // From hereinafter everything is almost like random and does not compress well + // due to unlikely prefix-sharing + buffer.put((byte) (timestamp >>> 8)); + buffer.put((byte) (sequenceId >>> 8)); + buffer.put((byte) timestamp); + buffer.put((byte) sequenceId); + + assert buffer.position() == uuidBytes.length; + + return BASE_64_NO_PADDING.encodeToString(uuidBytes); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java b/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java index 73528ed0d3866..2ed979ae66ffa 100644 --- a/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java +++ b/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java @@ -24,10 +24,10 @@ class TimeBasedUUIDGenerator implements UUIDGenerator { // We only use bottom 3 bytes for the sequence number. Paranoia: init with random int so that if JVM/OS/machine goes down, clock slips // backwards, and JVM comes back up, we are less likely to be on the same sequenceNumber at the same time: - private final AtomicInteger sequenceNumber = new AtomicInteger(SecureRandomHolder.INSTANCE.nextInt()); + protected final AtomicInteger sequenceNumber = new AtomicInteger(SecureRandomHolder.INSTANCE.nextInt()); // Used to ensure clock moves forward: - private final AtomicLong lastTimestamp = new AtomicLong(0); + protected final AtomicLong lastTimestamp = new AtomicLong(0); private static final byte[] SECURE_MUNGED_ADDRESS = MacAddressProvider.getSecureMungedAddress(); diff --git a/server/src/main/java/org/elasticsearch/common/UUIDs.java b/server/src/main/java/org/elasticsearch/common/UUIDs.java index 61ee4bd5d64ab..0f73b8172c10f 100644 --- a/server/src/main/java/org/elasticsearch/common/UUIDs.java +++ b/server/src/main/java/org/elasticsearch/common/UUIDs.java @@ -16,6 +16,8 @@ public class UUIDs { private static final RandomBasedUUIDGenerator RANDOM_UUID_GENERATOR = new RandomBasedUUIDGenerator(); + + private static final UUIDGenerator TIME_BASED_K_ORDERED_GENERATOR = new TimeBasedKOrderedUUIDGenerator(); private static final UUIDGenerator TIME_UUID_GENERATOR = new TimeBasedUUIDGenerator(); /** @@ -33,6 +35,14 @@ public static String base64UUID() { return TIME_UUID_GENERATOR.getBase64UUID(); } + public static String base64TimeBasedKOrderedUUID() { + return TIME_BASED_K_ORDERED_GENERATOR.getBase64UUID(); + } + + public static String base64TimeBasedUUID() { + return TIME_UUID_GENERATOR.getBase64UUID(); + } + /** * The length of a UUID string generated by {@link #randomBase64UUID} and {@link #randomBase64UUIDSecureString}. */ diff --git a/server/src/main/java/org/elasticsearch/common/time/TimeProvider.java b/server/src/main/java/org/elasticsearch/common/time/TimeProvider.java new file mode 100644 index 0000000000000..8b29d23397383 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/time/TimeProvider.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.common.time; + +/** + * An interface encapsulating the different methods for getting relative and absolute time. The main + * implementation of this is {@link org.elasticsearch.threadpool.ThreadPool}. To make it clear that a + * {@code ThreadPool} is being passed around only to get time, it is preferred to use this interface. + */ +public interface TimeProvider { + + /** + * Returns a value of milliseconds that may be used for relative time calculations. + * + * This method should only be used for calculating time deltas. For an epoch based + * timestamp, see {@link #absoluteTimeInMillis()}. + */ + long relativeTimeInMillis(); + + /** + * Returns a value of nanoseconds that may be used for relative time calculations. + * + * This method should only be used for calculating time deltas. For an epoch based + * timestamp, see {@link #absoluteTimeInMillis()}. + */ + long relativeTimeInNanos(); + + /** + * Returns a value of milliseconds that may be used for relative time calculations. Similar to {@link #relativeTimeInMillis()} except + * that this method is more expensive: the return value is computed directly from {@link System#nanoTime} and is not cached. You should + * use {@link #relativeTimeInMillis()} unless the extra accuracy offered by this method is worth the costs. + * + * When computing a time interval by comparing relative times in milliseconds, you should make sure that both endpoints use cached + * values returned from {@link #relativeTimeInMillis()} or that they both use raw values returned from this method. It doesn't really + * make sense to compare a raw value to a cached value, even if in practice the result of such a comparison will be approximately + * sensible. + */ + long rawRelativeTimeInMillis(); + + /** + * Returns the value of milliseconds since UNIX epoch. + * + * This method should only be used for exact date/time formatting. For calculating + * time deltas that should not suffer from negative deltas, which are possible with + * this method, see {@link #relativeTimeInMillis()}. + */ + long absoluteTimeInMillis(); +} diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 440613263d441..9264b9e1c3a20 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -132,6 +132,7 @@ private static Version parseUnchecked(String version) { public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT_BACKPORT = def(8_519_00_0, Version.LUCENE_9_12_0); public static final IndexVersion UPGRADE_TO_LUCENE_10_0_0 = def(9_000_00_0, Version.LUCENE_10_0_0); public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT = def(9_001_00_0, Version.LUCENE_10_0_0); + public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID = def(9_002_00_0, Version.LUCENE_10_0_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceService.java b/server/src/main/java/org/elasticsearch/inference/InferenceService.java index cd92f38e65152..f7b688ba37963 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceService.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceService.java @@ -74,6 +74,14 @@ default void init(Client client) {} InferenceServiceConfiguration getConfiguration(); + /** + * Whether this service should be hidden from the API. Should be used for services + * that are not ready to be used. + */ + default Boolean hideFromConfigurationApi() { + return Boolean.FALSE; + } + /** * The task types supported by the service * @return Set of supported. diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index 51f52326907eb..a1e8eb25f4780 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -12,6 +12,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.DelayableWriteable; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -260,7 +261,7 @@ private DateTime(DateFormatter formatter, ZoneId timeZone, DateFieldMapper.Resol this.formatSortValues = formatSortValues; } - public DateTime(StreamInput in) throws IOException { + private DateTime(StreamInput in) throws IOException { String formatterPattern = in.readString(); Locale locale = in.getTransportVersion().onOrAfter(TransportVersions.DATE_TIME_DOC_VALUES_LOCALES) ? LocaleUtils.parse(in.readString()) @@ -285,6 +286,14 @@ public String getWriteableName() { return NAME; } + public static DateTime readFrom(StreamInput in) throws IOException { + final DateTime dateTime = new DateTime(in); + if (in instanceof DelayableWriteable.Deduplicator d) { + return d.deduplicate(dateTime); + } + return dateTime; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(formatter.pattern()); @@ -528,7 +537,7 @@ public Decimal(String pattern) { this.format = new DecimalFormat(pattern, SYMBOLS); } - public Decimal(StreamInput in) throws IOException { + private Decimal(StreamInput in) throws IOException { this(in.readString()); } @@ -537,6 +546,14 @@ public String getWriteableName() { return NAME; } + public static Decimal readFrom(StreamInput in) throws IOException { + final Decimal decimal = new Decimal(in); + if (in instanceof DelayableWriteable.Deduplicator d) { + return d.deduplicate(decimal); + } + return decimal; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(pattern); diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index d5aa809801639..b8f50c6f9a62f 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -1013,8 +1013,8 @@ private void registerScoreFunction(ScoreFunctionSpec scoreFunction) { private void registerValueFormats() { registerValueFormat(DocValueFormat.BOOLEAN.getWriteableName(), in -> DocValueFormat.BOOLEAN); - registerValueFormat(DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::new); - registerValueFormat(DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::new); + registerValueFormat(DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::readFrom); + registerValueFormat(DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::readFrom); registerValueFormat(DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH); registerValueFormat(DocValueFormat.GEOTILE.getWriteableName(), in -> DocValueFormat.GEOTILE); registerValueFormat(DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP); diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index 0155ab34ae637..f55e3740aaa8f 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.TimeProvider; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.SizeValue; @@ -65,7 +66,7 @@ * Manages all the Java thread pools we create. {@link Names} contains a list of the thread pools, but plugins can dynamically add more * thread pools to instantiate. */ -public class ThreadPool implements ReportingService, Scheduler { +public class ThreadPool implements ReportingService, Scheduler, TimeProvider { private static final Logger logger = LogManager.getLogger(ThreadPool.class); @@ -362,12 +363,7 @@ protected ThreadPool() { this.scheduler = null; } - /** - * Returns a value of milliseconds that may be used for relative time calculations. - * - * This method should only be used for calculating time deltas. For an epoch based - * timestamp, see {@link #absoluteTimeInMillis()}. - */ + @Override public long relativeTimeInMillis() { return cachedTimeThread.relativeTimeInMillis(); } @@ -379,37 +375,17 @@ public LongSupplier relativeTimeInMillisSupplier() { return relativeTimeInMillisSupplier; } - /** - * Returns a value of nanoseconds that may be used for relative time calculations. - * - * This method should only be used for calculating time deltas. For an epoch based - * timestamp, see {@link #absoluteTimeInMillis()}. - */ + @Override public long relativeTimeInNanos() { return cachedTimeThread.relativeTimeInNanos(); } - /** - * Returns a value of milliseconds that may be used for relative time calculations. Similar to {@link #relativeTimeInMillis()} except - * that this method is more expensive: the return value is computed directly from {@link System#nanoTime} and is not cached. You should - * use {@link #relativeTimeInMillis()} unless the extra accuracy offered by this method is worth the costs. - * - * When computing a time interval by comparing relative times in milliseconds, you should make sure that both endpoints use cached - * values returned from {@link #relativeTimeInMillis()} or that they both use raw values returned from this method. It doesn't really - * make sense to compare a raw value to a cached value, even if in practice the result of such a comparison will be approximately - * sensible. - */ + @Override public long rawRelativeTimeInMillis() { return TimeValue.nsecToMSec(System.nanoTime()); } - /** - * Returns the value of milliseconds since UNIX epoch. - * - * This method should only be used for exact date/time formatting. For calculating - * time deltas that should not suffer from negative deltas, which are possible with - * this method, see {@link #relativeTimeInMillis()}. - */ + @Override public long absoluteTimeInMillis() { return cachedTimeThread.absoluteTimeInMillis(); } diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index 26c518962c19a..ba575cc642a81 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -132,3 +132,4 @@ 8.15.2,8702003 8.15.3,8702003 8.15.4,8702003 +8.16.0,8772001 diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index 6cab0b513ee63..c54aea88613f5 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -132,3 +132,4 @@ 8.15.2,8512000 8.15.3,8512000 8.15.4,8512000 +8.16.0,8518000 diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java index bb4aa9beeb42e..3dafc8f000f3f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java @@ -101,7 +101,7 @@ public void testDeleteDesiredBalance() throws Exception { var clusterSettings = ClusterSettings.createBuiltInClusterSettings(settings); var delegate = new BalancedShardsAllocator(); - var computer = new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, delegate) { + var computer = new DesiredBalanceComputer(clusterSettings, threadPool, delegate) { final AtomicReference lastComputationInput = new AtomicReference<>(); @@ -122,7 +122,8 @@ public DesiredBalance compute( clusterService, computer, (state, action) -> state, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var allocationService = new MockAllocationService( randomAllocationDeciders(settings, clusterSettings), diff --git a/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index 32297e0c09b8f..9d74c2069ec10 100644 --- a/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -128,6 +128,12 @@ public void testAutoGenerateId() { assertTrue("expected > 0 but got: " + request.getAutoGeneratedTimestamp(), request.getAutoGeneratedTimestamp() > 0); } + public void testAutoGenerateTimeBasedId() { + IndexRequest request = new IndexRequest("index"); + request.autoGenerateTimeBasedId(); + assertTrue("expected > 0 but got: " + request.getAutoGeneratedTimestamp(), request.getAutoGeneratedTimestamp() > 0); + } + public void testIndexResponse() { ShardId shardId = new ShardId(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10), randomIntBetween(0, 1000)); String id = randomAlphaOfLengthBetween(3, 10); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java index 69e6983e16381..0efa576a0cddc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java @@ -80,7 +80,12 @@ public void testShardStats() { var queue = new DeterministicTaskQueue(); try (var clusterService = ClusterServiceUtils.createClusterService(state, queue.getThreadPool())) { - var service = new AllocationStatsService(clusterService, () -> clusterInfo, createShardAllocator(), TEST_WRITE_LOAD_FORECASTER); + var service = new AllocationStatsService( + clusterService, + () -> clusterInfo, + createShardAllocator(), + new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER) + ); assertThat( service.stats(), allOf( @@ -120,7 +125,7 @@ public void testRelocatingShardIsOnlyCountedOnceOnTargetNode() { clusterService, EmptyClusterInfoService.INSTANCE, createShardAllocator(), - TEST_WRITE_LOAD_FORECASTER + new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER) ); assertThat( service.stats(), @@ -163,7 +168,8 @@ public void testUndesiredShardCount() { threadPool, clusterService, (innerState, strategy) -> innerState, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ) { @Override public DesiredBalance getDesiredBalance() { @@ -176,7 +182,7 @@ public DesiredBalance getDesiredBalance() { ); } }, - TEST_WRITE_LOAD_FORECASTER + new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER) ); assertThat( service.stats(), diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java index 44f3b7d1d3a11..c5ae771199541 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java @@ -490,7 +490,8 @@ private Map.Entry createNewAllocationSer clusterService, (clusterState, routingAllocationAction) -> strategyRef.get() .executeWithRoutingAllocation(clusterState, "reconcile-desired-balance", routingAllocationAction), - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ) { @Override public void allocate(RoutingAllocation allocation, ActionListener listener) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java index 51401acabb0ac..7b77947792bd4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java @@ -42,6 +42,8 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.TimeProvider; +import org.elasticsearch.common.time.TimeProviderUtils; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; import org.elasticsearch.index.IndexVersion; @@ -1203,42 +1205,40 @@ public void testShouldLogComputationIteration() { private void checkIterationLogging(int iterations, long eachIterationDuration, MockLog.AbstractEventExpectation expectation) { var currentTime = new AtomicLong(0L); + TimeProvider timeProvider = TimeProviderUtils.create(() -> currentTime.addAndGet(eachIterationDuration)); + // Some runs of this test try to simulate a long desired balance computation. Setting a high value on the following setting // prevents interrupting a long computation. var clusterSettings = createBuiltInClusterSettings( Settings.builder().put(DesiredBalanceComputer.MAX_BALANCE_COMPUTATION_TIME_DURING_INDEX_CREATION_SETTING.getKey(), "2m").build() ); - var desiredBalanceComputer = new DesiredBalanceComputer( - clusterSettings, - () -> currentTime.addAndGet(eachIterationDuration), - new ShardsAllocator() { - @Override - public void allocate(RoutingAllocation allocation) { - final var unassignedIterator = allocation.routingNodes().unassigned().iterator(); - while (unassignedIterator.hasNext()) { - final var shardRouting = unassignedIterator.next(); - if (shardRouting.primary()) { - unassignedIterator.initialize("node-0", null, 0L, allocation.changes()); - } else { - unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.NO_ATTEMPT, allocation.changes()); - } - } - - // move shard on each iteration - for (var shard : allocation.routingNodes().node("node-0").shardsWithState(STARTED).toList()) { - allocation.routingNodes().relocateShard(shard, "node-1", 0L, "test", allocation.changes()); - } - for (var shard : allocation.routingNodes().node("node-1").shardsWithState(STARTED).toList()) { - allocation.routingNodes().relocateShard(shard, "node-0", 0L, "test", allocation.changes()); + var desiredBalanceComputer = new DesiredBalanceComputer(clusterSettings, timeProvider, new ShardsAllocator() { + @Override + public void allocate(RoutingAllocation allocation) { + final var unassignedIterator = allocation.routingNodes().unassigned().iterator(); + while (unassignedIterator.hasNext()) { + final var shardRouting = unassignedIterator.next(); + if (shardRouting.primary()) { + unassignedIterator.initialize("node-0", null, 0L, allocation.changes()); + } else { + unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.NO_ATTEMPT, allocation.changes()); } } - @Override - public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { - throw new AssertionError("only used for allocation explain"); + // move shard on each iteration + for (var shard : allocation.routingNodes().node("node-0").shardsWithState(STARTED).toList()) { + allocation.routingNodes().relocateShard(shard, "node-1", 0L, "test", allocation.changes()); + } + for (var shard : allocation.routingNodes().node("node-1").shardsWithState(STARTED).toList()) { + allocation.routingNodes().relocateShard(shard, "node-0", 0L, "test", allocation.changes()); } } - ); + + @Override + public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { + throw new AssertionError("only used for allocation explain"); + } + }); assertThatLogger(() -> { var iteration = new AtomicInteger(0); @@ -1346,7 +1346,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing } private static DesiredBalanceComputer createDesiredBalanceComputer(ShardsAllocator allocator) { - return new DesiredBalanceComputer(createBuiltInClusterSettings(), () -> 0L, allocator); + return new DesiredBalanceComputer(createBuiltInClusterSettings(), TimeProviderUtils.create(() -> 0L), allocator); } private static void assertDesiredAssignments(DesiredBalance desiredBalance, Map expected) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java index 85dc5c9dcd6a9..9e6e080f38216 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java @@ -27,7 +27,7 @@ public void testZeroAllMetrics() { long unassignedShards = randomNonNegativeLong(); long totalAllocations = randomNonNegativeLong(); long undesiredAllocations = randomNonNegativeLong(); - metrics.updateMetrics(new AllocationStats(unassignedShards, totalAllocations, undesiredAllocations), Map.of()); + metrics.updateMetrics(new AllocationStats(unassignedShards, totalAllocations, undesiredAllocations), Map.of(), Map.of()); assertEquals(totalAllocations, metrics.totalAllocations()); assertEquals(unassignedShards, metrics.unassignedShards()); assertEquals(undesiredAllocations, metrics.undesiredAllocations()); @@ -44,7 +44,7 @@ public void testMetricsAreOnlyPublishedWhenNodeIsMaster() { long unassignedShards = randomNonNegativeLong(); long totalAllocations = randomLongBetween(100, 10000000); long undesiredAllocations = randomLongBetween(0, totalAllocations); - metrics.updateMetrics(new AllocationStats(unassignedShards, totalAllocations, undesiredAllocations), Map.of()); + metrics.updateMetrics(new AllocationStats(unassignedShards, totalAllocations, undesiredAllocations), Map.of(), Map.of()); // Collect when not master meterRegistry.getRecorder().collect(); @@ -104,7 +104,7 @@ public void testUndesiredAllocationRatioIsZeroWhenTotalShardsIsZero() { RecordingMeterRegistry meterRegistry = new RecordingMeterRegistry(); DesiredBalanceMetrics metrics = new DesiredBalanceMetrics(meterRegistry); long unassignedShards = randomNonNegativeLong(); - metrics.updateMetrics(new AllocationStats(unassignedShards, 0, 0), Map.of()); + metrics.updateMetrics(new AllocationStats(unassignedShards, 0, 0), Map.of(), Map.of()); metrics.setNodeIsMaster(true); meterRegistry.getRecorder().collect(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java index b5f44ee9e505f..54f4f0ffb6e15 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java @@ -1214,7 +1214,8 @@ public void testRebalanceDoesNotCauseHotSpots() { var reconciler = new DesiredBalanceReconciler( clusterSettings, new DeterministicTaskQueue().getThreadPool(), - DesiredBalanceMetrics.NOOP + DesiredBalanceMetrics.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var totalOutgoingMoves = new HashMap(); @@ -1296,7 +1297,12 @@ public void testShouldLogOnTooManyUndesiredAllocations() { final var timeInMillisSupplier = new AtomicLong(); when(threadPool.relativeTimeInMillisSupplier()).thenReturn(timeInMillisSupplier::incrementAndGet); - var reconciler = new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, DesiredBalanceMetrics.NOOP); + var reconciler = new DesiredBalanceReconciler( + createBuiltInClusterSettings(), + threadPool, + DesiredBalanceMetrics.NOOP, + EMPTY_NODE_ALLOCATION_STATS + ); final long initialDelayInMillis = TimeValue.timeValueMinutes(5).getMillis(); timeInMillisSupplier.addAndGet(randomLongBetween(initialDelayInMillis, 2 * initialDelayInMillis)); @@ -1348,10 +1354,8 @@ public void testShouldLogOnTooManyUndesiredAllocations() { private static void reconcile(RoutingAllocation routingAllocation, DesiredBalance desiredBalance) { final var threadPool = mock(ThreadPool.class); when(threadPool.relativeTimeInMillisSupplier()).thenReturn(new AtomicLong()::incrementAndGet); - new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, DesiredBalanceMetrics.NOOP).reconcile( - desiredBalance, - routingAllocation - ); + new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, DesiredBalanceMetrics.NOOP, EMPTY_NODE_ALLOCATION_STATS) + .reconcile(desiredBalance, routingAllocation); } private static boolean isReconciled(RoutingNode node, DesiredBalance balance) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java index 2cb3204787ce1..b18e2c0cd2647 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java @@ -44,6 +44,7 @@ import org.elasticsearch.cluster.service.FakeThreadPoolMasterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.TimeProviderUtils; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.elasticsearch.core.TimeValue; @@ -168,7 +169,8 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo threadPool, clusterService, reconcileAction, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); assertValidStats(desiredBalanceShardsAllocator.getStats()); var allocationService = createAllocationService(desiredBalanceShardsAllocator, createGatewayAllocator(allocateUnassigned)); @@ -295,7 +297,8 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo threadPool, clusterService, reconcileAction, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var allocationService = new AllocationService( new AllocationDeciders(List.of()), @@ -396,7 +399,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing shardsAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, time::get, shardsAllocator) { + new DesiredBalanceComputer(clusterSettings, TimeProviderUtils.create(time::get), shardsAllocator) { @Override public DesiredBalance compute( DesiredBalance previousDesiredBalance, @@ -413,7 +416,8 @@ boolean hasEnoughIterations(int currentIteration) { } }, reconcileAction, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var allocationService = createAllocationService(desiredBalanceShardsAllocator, gatewayAllocator); allocationServiceRef.set(allocationService); @@ -522,7 +526,7 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo shardsAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, shardsAllocator) { + new DesiredBalanceComputer(clusterSettings, threadPool, shardsAllocator) { @Override public DesiredBalance compute( DesiredBalance previousDesiredBalance, @@ -540,7 +544,8 @@ public DesiredBalance compute( } }, reconcileAction, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var allocationService = createAllocationService(desiredBalanceShardsAllocator, gatewayAllocator); allocationServiceRef.set(allocationService); @@ -625,7 +630,7 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo shardsAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, shardsAllocator) { + new DesiredBalanceComputer(clusterSettings, threadPool, shardsAllocator) { @Override public DesiredBalance compute( DesiredBalance previousDesiredBalance, @@ -643,7 +648,8 @@ public DesiredBalance compute( } }, reconcileAction, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var allocationService = createAllocationService(desiredBalanceShardsAllocator, gatewayAllocator); @@ -712,7 +718,7 @@ public void testResetDesiredBalance() { var delegateAllocator = createShardsAllocator(); var clusterSettings = createBuiltInClusterSettings(); - var desiredBalanceComputer = new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, delegateAllocator) { + var desiredBalanceComputer = new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator) { final AtomicReference lastComputationInput = new AtomicReference<>(); @@ -734,7 +740,8 @@ public DesiredBalance compute( clusterService, desiredBalanceComputer, (reconcilerClusterState, rerouteStrategy) -> reconcilerClusterState, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var service = createAllocationService(desiredBalanceShardsAllocator, createGatewayAllocator()); @@ -780,18 +787,15 @@ public void testResetDesiredBalanceOnNoLongerMaster() { var clusterService = ClusterServiceUtils.createClusterService(clusterState, threadPool); var delegateAllocator = createShardsAllocator(); - var desiredBalanceComputer = new DesiredBalanceComputer( - createBuiltInClusterSettings(), - threadPool::relativeTimeInMillis, - delegateAllocator - ); + var desiredBalanceComputer = new DesiredBalanceComputer(createBuiltInClusterSettings(), threadPool, delegateAllocator); var desiredBalanceShardsAllocator = new DesiredBalanceShardsAllocator( delegateAllocator, threadPool, clusterService, desiredBalanceComputer, (reconcilerClusterState, rerouteStrategy) -> reconcilerClusterState, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var service = createAllocationService(desiredBalanceShardsAllocator, createGatewayAllocator()); @@ -833,18 +837,15 @@ public void testResetDesiredBalanceOnNodeShutdown() { final var resetCalled = new AtomicBoolean(); var delegateAllocator = createShardsAllocator(); - var desiredBalanceComputer = new DesiredBalanceComputer( - createBuiltInClusterSettings(), - threadPool::relativeTimeInMillis, - delegateAllocator - ); + var desiredBalanceComputer = new DesiredBalanceComputer(createBuiltInClusterSettings(), threadPool, delegateAllocator); var desiredBalanceAllocator = new DesiredBalanceShardsAllocator( delegateAllocator, threadPool, clusterService, desiredBalanceComputer, (reconcilerClusterState, rerouteStrategy) -> reconcilerClusterState, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ) { @Override public void resetDesiredBalance() { diff --git a/server/src/test/java/org/elasticsearch/common/UUIDTests.java b/server/src/test/java/org/elasticsearch/common/UUIDTests.java index 2e7dbb00aa2de..9fbeaf1c6c081 100644 --- a/server/src/test/java/org/elasticsearch/common/UUIDTests.java +++ b/server/src/test/java/org/elasticsearch/common/UUIDTests.java @@ -35,6 +35,7 @@ public class UUIDTests extends ESTestCase { static UUIDGenerator timeUUIDGen = new TimeBasedUUIDGenerator(); static UUIDGenerator randomUUIDGen = new RandomBasedUUIDGenerator(); + static UUIDGenerator kOrderedUUIDGen = new TimeBasedKOrderedUUIDGenerator(); public void testRandomUUID() { verifyUUIDSet(100000, randomUUIDGen); @@ -44,14 +45,49 @@ public void testTimeUUID() { verifyUUIDSet(100000, timeUUIDGen); } - public void testThreadedTimeUUID() { - testUUIDThreaded(timeUUIDGen); + public void testKOrderedUUID() { + verifyUUIDSet(100000, kOrderedUUIDGen); } public void testThreadedRandomUUID() { testUUIDThreaded(randomUUIDGen); } + public void testThreadedTimeUUID() { + testUUIDThreaded(timeUUIDGen); + } + + public void testThreadedKOrderedUUID() { + testUUIDThreaded(kOrderedUUIDGen); + } + + public void testCompression() throws Exception { + Logger logger = LogManager.getLogger(UUIDTests.class); + + assertThat(testCompression(timeUUIDGen, 100000, 10000, 3, logger), Matchers.lessThan(14d)); + assertThat(testCompression(timeUUIDGen, 100000, 1000, 3, logger), Matchers.lessThan(15d)); + assertThat(testCompression(timeUUIDGen, 100000, 100, 3, logger), Matchers.lessThan(21d)); + + assertThat(testCompression(kOrderedUUIDGen, 100000, 10000, 3, logger), Matchers.lessThan(13d)); + assertThat(testCompression(kOrderedUUIDGen, 100000, 1000, 3, logger), Matchers.lessThan(14d)); + assertThat(testCompression(kOrderedUUIDGen, 100000, 100, 3, logger), Matchers.lessThan(19d)); + } + + public void testComparativeCompression() throws Exception { + Logger logger = LogManager.getLogger(UUIDTests.class); + + int numDocs = 100000; + int docsPerSecond = 1000; + int nodes = 3; + + double randomCompression = testCompression(randomUUIDGen, numDocs, docsPerSecond, nodes, logger); + double baseCompression = testCompression(timeUUIDGen, numDocs, docsPerSecond, nodes, logger); + double kOrderedCompression = testCompression(kOrderedUUIDGen, numDocs, docsPerSecond, nodes, logger); + + assertThat(kOrderedCompression, Matchers.lessThanOrEqualTo(baseCompression)); + assertThat(kOrderedCompression, Matchers.lessThanOrEqualTo(randomCompression)); + } + Set verifyUUIDSet(int count, UUIDGenerator uuidSource) { HashSet uuidSet = new HashSet<>(); for (int i = 0; i < count; ++i) { @@ -109,49 +145,62 @@ public void testUUIDThreaded(UUIDGenerator uuidSource) { assertEquals(count * uuids, globalSet.size()); } - public void testCompression() throws Exception { - Logger logger = LogManager.getLogger(UUIDTests.class); - // Low number so that the test runs quickly, but the results are more interesting with larger numbers - // of indexed documents - assertThat(testCompression(100000, 10000, 3, logger), Matchers.lessThan(14d)); // ~12 in practice - assertThat(testCompression(100000, 1000, 3, logger), Matchers.lessThan(15d)); // ~13 in practice - assertThat(testCompression(100000, 100, 3, logger), Matchers.lessThan(21d)); // ~20 in practice - } - - private static double testCompression(int numDocs, int numDocsPerSecond, int numNodes, Logger logger) throws Exception { - final double intervalBetweenDocs = 1000. / numDocsPerSecond; // milliseconds + private static double testCompression(final UUIDGenerator generator, int numDocs, int numDocsPerSecond, int numNodes, Logger logger) + throws Exception { + final double intervalBetweenDocs = 1000. / numDocsPerSecond; final byte[][] macAddresses = new byte[numNodes][]; Random r = random(); for (int i = 0; i < macAddresses.length; ++i) { macAddresses[i] = new byte[6]; random().nextBytes(macAddresses[i]); } - UUIDGenerator generator = new TimeBasedUUIDGenerator() { - double currentTimeMillis = TestUtil.nextLong(random(), 0L, 10000000000L); - @Override - protected long currentTimeMillis() { - currentTimeMillis += intervalBetweenDocs * 2 * r.nextDouble(); - return (long) currentTimeMillis; + UUIDGenerator uuidSource = generator; + if (generator instanceof TimeBasedUUIDGenerator) { + if (generator instanceof TimeBasedKOrderedUUIDGenerator) { + uuidSource = new TimeBasedKOrderedUUIDGenerator() { + double currentTimeMillis = TestUtil.nextLong(random(), 0L, 10000000000L); + + @Override + protected long currentTimeMillis() { + currentTimeMillis += intervalBetweenDocs * 2 * r.nextDouble(); + return (long) currentTimeMillis; + } + + @Override + protected byte[] macAddress() { + return RandomPicks.randomFrom(r, macAddresses); + } + }; + } else { + uuidSource = new TimeBasedUUIDGenerator() { + double currentTimeMillis = TestUtil.nextLong(random(), 0L, 10000000000L); + + @Override + protected long currentTimeMillis() { + currentTimeMillis += intervalBetweenDocs * 2 * r.nextDouble(); + return (long) currentTimeMillis; + } + + @Override + protected byte[] macAddress() { + return RandomPicks.randomFrom(r, macAddresses); + } + }; } + } - @Override - protected byte[] macAddress() { - return RandomPicks.randomFrom(r, macAddresses); - } - }; - // Avoid randomization which will slow down things without improving - // the quality of this test Directory dir = newFSDirectory(createTempDir()); IndexWriterConfig config = new IndexWriterConfig().setCodec(Codec.forName(Lucene.LATEST_CODEC)) - .setMergeScheduler(new SerialMergeScheduler()); // for reproducibility + .setMergeScheduler(new SerialMergeScheduler()); + IndexWriter w = new IndexWriter(dir, config); Document doc = new Document(); StringField id = new StringField("_id", "", Store.NO); doc.add(id); long start = System.nanoTime(); for (int i = 0; i < numDocs; ++i) { - id.setStringValue(generator.getBase64UUID()); + id.setStringValue(uuidSource.getBase64UUID()); w.addDocument(doc); } w.forceMerge(1); @@ -164,30 +213,25 @@ protected byte[] macAddress() { dir.close(); double bytesPerDoc = (double) size / numDocs; logger.info( - numDocs - + " docs indexed at " - + numDocsPerSecond - + " docs/s required " - + ByteSizeValue.ofBytes(size) - + " bytes of disk space, or " - + bytesPerDoc - + " bytes per document. Took: " - + new TimeValue(time) - + "." + "{} - {} docs indexed at {} docs/s required {} bytes of disk space, or {} bytes per document. Took: {}.", + uuidSource.getClass().getSimpleName(), + numDocs, + numDocsPerSecond, + ByteSizeValue.ofBytes(size), + bytesPerDoc, + new TimeValue(time) ); return bytesPerDoc; } public void testStringLength() { assertEquals(UUIDs.RANDOM_BASED_UUID_STRING_LENGTH, getUnpaddedBase64StringLength(RandomBasedUUIDGenerator.SIZE_IN_BYTES)); - assertEquals(UUIDs.RANDOM_BASED_UUID_STRING_LENGTH, UUIDs.randomBase64UUID().length()); - assertEquals(UUIDs.RANDOM_BASED_UUID_STRING_LENGTH, UUIDs.randomBase64UUID(random()).length()); - try (var secureString = UUIDs.randomBase64UUIDSecureString()) { - assertEquals(UUIDs.RANDOM_BASED_UUID_STRING_LENGTH, secureString.toString().length()); - } - assertEquals(UUIDs.TIME_BASED_UUID_STRING_LENGTH, getUnpaddedBase64StringLength(TimeBasedUUIDGenerator.SIZE_IN_BYTES)); - assertEquals(UUIDs.TIME_BASED_UUID_STRING_LENGTH, UUIDs.base64UUID().length()); + assertEquals(UUIDs.TIME_BASED_UUID_STRING_LENGTH, getUnpaddedBase64StringLength(TimeBasedKOrderedUUIDGenerator.SIZE_IN_BYTES)); + + assertEquals(UUIDs.RANDOM_BASED_UUID_STRING_LENGTH, randomUUIDGen.getBase64UUID().length()); + assertEquals(UUIDs.TIME_BASED_UUID_STRING_LENGTH, timeUUIDGen.getBase64UUID().length()); + assertEquals(UUIDs.TIME_BASED_UUID_STRING_LENGTH, kOrderedUUIDGen.getBase64UUID().length()); } private static int getUnpaddedBase64StringLength(int sizeInBytes) { diff --git a/server/src/test/java/org/elasticsearch/common/time/TimeProviderUtils.java b/server/src/test/java/org/elasticsearch/common/time/TimeProviderUtils.java new file mode 100644 index 0000000000000..a3c5c105eb34a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/time/TimeProviderUtils.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.common.time; + +import org.elasticsearch.core.TimeValue; + +import java.util.function.LongSupplier; + +public class TimeProviderUtils { + + /** + * Creates a TimeProvider implementation for tests that uses the same source for + * all methods (regardless of relative or absolute time). + */ + public static TimeProvider create(LongSupplier timeSourceInMillis) { + return new TimeProvider() { + @Override + public long relativeTimeInMillis() { + return timeSourceInMillis.getAsLong(); + } + + @Override + public long relativeTimeInNanos() { + return timeSourceInMillis.getAsLong() * TimeValue.NSEC_PER_MSEC; + } + + @Override + public long rawRelativeTimeInMillis() { + return timeSourceInMillis.getAsLong(); + } + + @Override + public long absoluteTimeInMillis() { + return timeSourceInMillis.getAsLong(); + } + }; + } +} diff --git a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java index e81066a731d2e..7c9a68cbc91f1 100644 --- a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java +++ b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java @@ -43,8 +43,8 @@ public class DocValueFormatTests extends ESTestCase { public void testSerialization() throws Exception { List entries = new ArrayList<>(); entries.add(new Entry(DocValueFormat.class, DocValueFormat.BOOLEAN.getWriteableName(), in -> DocValueFormat.BOOLEAN)); - entries.add(new Entry(DocValueFormat.class, DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::new)); - entries.add(new Entry(DocValueFormat.class, DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::new)); + entries.add(new Entry(DocValueFormat.class, DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::readFrom)); + entries.add(new Entry(DocValueFormat.class, DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::readFrom)); entries.add(new Entry(DocValueFormat.class, DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH)); entries.add(new Entry(DocValueFormat.class, DocValueFormat.GEOTILE.getWriteableName(), in -> DocValueFormat.GEOTILE)); entries.add(new Entry(DocValueFormat.class, DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP)); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java index a1718e956800c..a041efc9ad3f1 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java @@ -24,6 +24,8 @@ import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedShard; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; @@ -37,6 +39,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Strings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.gateway.GatewayAllocator; @@ -165,7 +168,8 @@ private static DesiredBalanceShardsAllocator createDesiredBalanceShardsAllocator queue.getThreadPool(), clusterService, null, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ) { private RoutingAllocation lastAllocation; @@ -432,4 +436,17 @@ public void allocateUnassigned( } } } + + protected static final NodeAllocationStatsProvider EMPTY_NODE_ALLOCATION_STATS = new NodeAllocationStatsProvider( + WriteLoadForecaster.DEFAULT + ) { + @Override + public Map stats( + ClusterState clusterState, + ClusterInfo clusterInfo, + @Nullable DesiredBalance desiredBalance + ) { + return Map.of(); + } + }; } diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index f9a1318cd9740..081c83b1e7067 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -135,9 +135,9 @@ public void testApisWithoutTaskType() throws IOException { public void testGetServicesWithoutTaskType() throws IOException { List services = getAllServices(); if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { - assertThat(services.size(), equalTo(19)); - } else { assertThat(services.size(), equalTo(18)); + } else { + assertThat(services.size(), equalTo(17)); } String[] providers = new String[services.size()]; @@ -160,7 +160,6 @@ public void testGetServicesWithoutTaskType() throws IOException { "googleaistudio", "googlevertexai", "hugging_face", - "hugging_face_elser", "mistral", "openai", "streaming_completion_test_service", @@ -259,9 +258,9 @@ public void testGetServicesWithSparseEmbeddingTaskType() throws IOException { List services = getServices(TaskType.SPARSE_EMBEDDING); if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { - assertThat(services.size(), equalTo(6)); - } else { assertThat(services.size(), equalTo(5)); + } else { + assertThat(services.size(), equalTo(4)); } String[] providers = new String[services.size()]; @@ -272,9 +271,7 @@ public void testGetServicesWithSparseEmbeddingTaskType() throws IOException { Arrays.sort(providers); - var providerList = new ArrayList<>( - Arrays.asList("alibabacloud-ai-search", "elasticsearch", "hugging_face", "hugging_face_elser", "test_service") - ); + var providerList = new ArrayList<>(Arrays.asList("alibabacloud-ai-search", "elasticsearch", "hugging_face", "test_service")); if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { providerList.add(1, "elastic"); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceServicesAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceServicesAction.java index a6109bfe659d7..002b2b0fe93b0 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceServicesAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceServicesAction.java @@ -68,7 +68,10 @@ private void getServiceConfigurationsForTaskType( var filteredServices = serviceRegistry.getServices() .entrySet() .stream() - .filter(service -> service.getValue().supportedTaskTypes().contains(requestedTaskType)) + .filter( + service -> service.getValue().hideFromConfigurationApi() == false + && service.getValue().supportedTaskTypes().contains(requestedTaskType) + ) .collect(Collectors.toSet()); getServiceConfigurationsForServices(filteredServices, listener.delegateFailureAndWrap((delegate, configurations) -> { @@ -77,12 +80,14 @@ private void getServiceConfigurationsForTaskType( } private void getAllServiceConfigurations(ActionListener listener) { - getServiceConfigurationsForServices( - serviceRegistry.getServices().entrySet(), - listener.delegateFailureAndWrap((delegate, configurations) -> { - delegate.onResponse(new GetInferenceServicesAction.Response(configurations)); - }) - ); + var availableServices = serviceRegistry.getServices() + .entrySet() + .stream() + .filter(service -> service.getValue().hideFromConfigurationApi() == false) + .collect(Collectors.toSet()); + getServiceConfigurationsForServices(availableServices, listener.delegateFailureAndWrap((delegate, configurations) -> { + delegate.onResponse(new GetInferenceServicesAction.Response(configurations)); + })); } private void getServiceConfigurationsForServices( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionCreator.java index 27b3ae95f1aa4..99f535f81485c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionCreator.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionCreator.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.inference.external.action.googlevertexai; +import org.elasticsearch.inference.InputType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.SenderExecutableAction; import org.elasticsearch.xpack.inference.external.http.sender.GoogleVertexAiEmbeddingsRequestManager; @@ -33,9 +34,10 @@ public GoogleVertexAiActionCreator(Sender sender, ServiceComponents serviceCompo } @Override - public ExecutableAction create(GoogleVertexAiEmbeddingsModel model, Map taskSettings) { + public ExecutableAction create(GoogleVertexAiEmbeddingsModel model, Map taskSettings, InputType inputType) { + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, taskSettings, inputType); var requestManager = new GoogleVertexAiEmbeddingsRequestManager( - model, + overriddenModel, serviceComponents.truncator(), serviceComponents.threadPool() ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionVisitor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionVisitor.java index def8f09ce06be..2b5cd5854c8ab 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionVisitor.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionVisitor.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.inference.external.action.googlevertexai; +import org.elasticsearch.inference.InputType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsModel; import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankModel; @@ -15,7 +16,7 @@ public interface GoogleVertexAiActionVisitor { - ExecutableAction create(GoogleVertexAiEmbeddingsModel model, Map taskSettings); + ExecutableAction create(GoogleVertexAiEmbeddingsModel model, Map taskSettings, InputType inputType); ExecutableAction create(GoogleVertexAiRerankModel model, Map taskSettings); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequest.java index c0e36baf2e98f..75320bc762c8b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequest.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequest.java @@ -40,7 +40,7 @@ public HttpRequest createHttpRequest() { HttpPost httpPost = new HttpPost(model.uri()); ByteArrayEntity byteEntity = new ByteArrayEntity( - Strings.toString(new GoogleVertexAiEmbeddingsRequestEntity(truncationResult.input(), model.getTaskSettings().autoTruncate())) + Strings.toString(new GoogleVertexAiEmbeddingsRequestEntity(truncationResult.input(), model.getTaskSettings())) .getBytes(StandardCharsets.UTF_8) ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntity.java index 2fae999599ba2..fc33df0d63acd 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntity.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntity.java @@ -7,23 +7,35 @@ package org.elasticsearch.xpack.inference.external.request.googlevertexai; -import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings; import java.io.IOException; import java.util.List; import java.util.Objects; -public record GoogleVertexAiEmbeddingsRequestEntity(List inputs, @Nullable Boolean autoTruncation) implements ToXContentObject { +import static org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingsTaskSettings.invalidInputTypeMessage; + +public record GoogleVertexAiEmbeddingsRequestEntity(List inputs, GoogleVertexAiEmbeddingsTaskSettings taskSettings) + implements + ToXContentObject { private static final String INSTANCES_FIELD = "instances"; private static final String CONTENT_FIELD = "content"; private static final String PARAMETERS_FIELD = "parameters"; private static final String AUTO_TRUNCATE_FIELD = "autoTruncate"; + private static final String TASK_TYPE_FIELD = "task_type"; + + private static final String CLASSIFICATION_TASK_TYPE = "CLASSIFICATION"; + private static final String CLUSTERING_TASK_TYPE = "CLUSTERING"; + private static final String RETRIEVAL_DOCUMENT_TASK_TYPE = "RETRIEVAL_DOCUMENT"; + private static final String RETRIEVAL_QUERY_TASK_TYPE = "RETRIEVAL_QUERY"; public GoogleVertexAiEmbeddingsRequestEntity { Objects.requireNonNull(inputs); + Objects.requireNonNull(taskSettings); } @Override @@ -35,16 +47,20 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); { builder.field(CONTENT_FIELD, input); + + if (taskSettings.getInputType() != null) { + builder.field(TASK_TYPE_FIELD, convertToString(taskSettings.getInputType())); + } } builder.endObject(); } builder.endArray(); - if (autoTruncation != null) { + if (taskSettings.autoTruncate() != null) { builder.startObject(PARAMETERS_FIELD); { - builder.field(AUTO_TRUNCATE_FIELD, autoTruncation); + builder.field(AUTO_TRUNCATE_FIELD, taskSettings.autoTruncate()); } builder.endObject(); } @@ -52,4 +68,17 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + + static String convertToString(InputType inputType) { + return switch (inputType) { + case INGEST -> RETRIEVAL_DOCUMENT_TASK_TYPE; + case SEARCH -> RETRIEVAL_QUERY_TASK_TYPE; + case CLASSIFICATION -> CLASSIFICATION_TASK_TYPE; + case CLUSTERING -> CLUSTERING_TASK_TYPE; + default -> { + assert false : invalidInputTypeMessage(inputType); + yield null; + } + }; + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiModel.java index 17e6ec2152e7e..caa244f8af4f2 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiModel.java @@ -7,13 +7,16 @@ package org.elasticsearch.xpack.inference.services.googlevertexai; +import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.TaskSettings; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.googlevertexai.GoogleVertexAiActionVisitor; +import java.net.URI; import java.util.Map; import java.util.Objects; @@ -21,6 +24,8 @@ public abstract class GoogleVertexAiModel extends Model { private final GoogleVertexAiRateLimitServiceSettings rateLimitServiceSettings; + protected URI uri; + public GoogleVertexAiModel( ModelConfigurations configurations, ModelSecrets secrets, @@ -34,13 +39,24 @@ public GoogleVertexAiModel( public GoogleVertexAiModel(GoogleVertexAiModel model, ServiceSettings serviceSettings) { super(model, serviceSettings); + uri = model.uri(); + rateLimitServiceSettings = model.rateLimitServiceSettings(); + } + + public GoogleVertexAiModel(GoogleVertexAiModel model, TaskSettings taskSettings) { + super(model, taskSettings); + + uri = model.uri(); rateLimitServiceSettings = model.rateLimitServiceSettings(); } - public abstract ExecutableAction accept(GoogleVertexAiActionVisitor creator, Map taskSettings); + public abstract ExecutableAction accept(GoogleVertexAiActionVisitor creator, Map taskSettings, InputType inputType); public GoogleVertexAiRateLimitServiceSettings rateLimitServiceSettings() { return rateLimitServiceSettings; } + public URI uri() { + return uri; + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java index 0b4da10e7130f..a05b1a937d376 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java @@ -210,7 +210,7 @@ protected void doInfer( var actionCreator = new GoogleVertexAiActionCreator(getSender(), getServiceComponents()); - var action = googleVertexAiModel.accept(actionCreator, taskSettings); + var action = googleVertexAiModel.accept(actionCreator, taskSettings, inputType); action.execute(inputs, timeout, listener); } @@ -235,7 +235,7 @@ protected void doChunkedInfer( ).batchRequestsWithListeners(listener); for (var request : batchedRequests) { - var action = googleVertexAiModel.accept(actionCreator, taskSettings); + var action = googleVertexAiModel.accept(actionCreator, taskSettings, inputType); action.execute(new DocumentsOnlyInput(request.batch().inputs()), timeout, request.listener()); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java index 1df8ee937497a..a5acbb80b76ec 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java @@ -11,12 +11,14 @@ import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; +import org.elasticsearch.inference.configuration.SettingsConfigurationSelectOption; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.googlevertexai.GoogleVertexAiActionVisitor; import org.elasticsearch.xpack.inference.external.request.googlevertexai.GoogleVertexAiUtils; @@ -29,13 +31,25 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.stream.Stream; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE; public class GoogleVertexAiEmbeddingsModel extends GoogleVertexAiModel { - private URI uri; + public static GoogleVertexAiEmbeddingsModel of( + GoogleVertexAiEmbeddingsModel model, + Map taskSettings, + InputType inputType + ) { + var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap(taskSettings); + return new GoogleVertexAiEmbeddingsModel( + model, + GoogleVertexAiEmbeddingsTaskSettings.of(model.getTaskSettings(), requestTaskSettings, inputType) + ); + } public GoogleVertexAiEmbeddingsModel( String inferenceEntityId, @@ -62,6 +76,10 @@ public GoogleVertexAiEmbeddingsModel(GoogleVertexAiEmbeddingsModel model, Google super(model, serviceSettings); } + public GoogleVertexAiEmbeddingsModel(GoogleVertexAiEmbeddingsModel model, GoogleVertexAiEmbeddingsTaskSettings taskSettings) { + super(model, taskSettings); + } + // Should only be used directly for testing GoogleVertexAiEmbeddingsModel( String inferenceEntityId, @@ -126,13 +144,9 @@ public GoogleVertexAiEmbeddingsRateLimitServiceSettings rateLimitServiceSettings return (GoogleVertexAiEmbeddingsRateLimitServiceSettings) super.rateLimitServiceSettings(); } - public URI uri() { - return uri; - } - @Override - public ExecutableAction accept(GoogleVertexAiActionVisitor visitor, Map taskSettings) { - return visitor.create(this, taskSettings); + public ExecutableAction accept(GoogleVertexAiActionVisitor visitor, Map taskSettings, InputType inputType) { + return visitor.create(this, taskSettings, inputType); } public static URI buildUri(String location, String projectId, String modelId) throws URISyntaxException { @@ -161,11 +175,32 @@ public static Map get() { new LazyInitializable<>(() -> { var configurationMap = new HashMap(); + configurationMap.put( + INPUT_TYPE, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.DROPDOWN) + .setLabel("Input Type") + .setOrder(1) + .setRequired(false) + .setSensitive(false) + .setTooltip("Specifies the type of input passed to the model.") + .setType(SettingsConfigurationFieldType.STRING) + .setOptions( + Stream.of( + InputType.CLASSIFICATION.toString(), + InputType.CLUSTERING.toString(), + InputType.INGEST.toString(), + InputType.SEARCH.toString() + ).map(v -> new SettingsConfigurationSelectOption.Builder().setLabelAndValue(v).build()).toList() + ) + .setValue("") + .build() + ); + configurationMap.put( AUTO_TRUNCATE, new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TOGGLE) .setLabel("Auto Truncate") - .setOrder(1) + .setOrder(2) .setRequired(false) .setSensitive(false) .setTooltip("Specifies if the API truncates inputs longer than the maximum token length automatically.") diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettings.java index 14a67a64377e2..e39c423582151 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettings.java @@ -9,29 +9,46 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.ModelConfigurations; import java.util.Map; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalBoolean; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalEnum; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.VALID_REQUEST_VALUES; -public record GoogleVertexAiEmbeddingsRequestTaskSettings(@Nullable Boolean autoTruncate) { +public record GoogleVertexAiEmbeddingsRequestTaskSettings(@Nullable Boolean autoTruncate, @Nullable InputType inputType) { - public static final GoogleVertexAiEmbeddingsRequestTaskSettings EMPTY_SETTINGS = new GoogleVertexAiEmbeddingsRequestTaskSettings(null); + public static final GoogleVertexAiEmbeddingsRequestTaskSettings EMPTY_SETTINGS = new GoogleVertexAiEmbeddingsRequestTaskSettings( + null, + null + ); public static GoogleVertexAiEmbeddingsRequestTaskSettings fromMap(Map map) { - if (map.isEmpty()) { - return GoogleVertexAiEmbeddingsRequestTaskSettings.EMPTY_SETTINGS; + if (map == null || map.isEmpty()) { + return EMPTY_SETTINGS; } ValidationException validationException = new ValidationException(); + InputType inputType = extractOptionalEnum( + map, + INPUT_TYPE, + ModelConfigurations.TASK_SETTINGS, + InputType::fromString, + VALID_REQUEST_VALUES, + validationException + ); + Boolean autoTruncate = extractOptionalBoolean(map, GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, validationException); if (validationException.validationErrors().isEmpty() == false) { throw validationException; } - return new GoogleVertexAiEmbeddingsRequestTaskSettings(autoTruncate); + return new GoogleVertexAiEmbeddingsRequestTaskSettings(autoTruncate, inputType); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettings.java index dcdbbda33575f..9b759a4661bce 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettings.java @@ -9,19 +9,24 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.TaskSettings; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.EnumSet; import java.util.HashMap; import java.util.Map; import java.util.Objects; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalBoolean; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalEnum; public class GoogleVertexAiEmbeddingsTaskSettings implements TaskSettings { @@ -29,48 +34,108 @@ public class GoogleVertexAiEmbeddingsTaskSettings implements TaskSettings { public static final String AUTO_TRUNCATE = "auto_truncate"; - public static final GoogleVertexAiEmbeddingsTaskSettings EMPTY_SETTINGS = new GoogleVertexAiEmbeddingsTaskSettings( - Boolean.valueOf(null) + public static final String INPUT_TYPE = "input_type"; + + static final EnumSet VALID_REQUEST_VALUES = EnumSet.of( + InputType.INGEST, + InputType.SEARCH, + InputType.CLASSIFICATION, + InputType.CLUSTERING ); + public static final GoogleVertexAiEmbeddingsTaskSettings EMPTY_SETTINGS = new GoogleVertexAiEmbeddingsTaskSettings(null, null); + public static GoogleVertexAiEmbeddingsTaskSettings fromMap(Map map) { + if (map == null || map.isEmpty()) { + return EMPTY_SETTINGS; + } + ValidationException validationException = new ValidationException(); + InputType inputType = extractOptionalEnum( + map, + INPUT_TYPE, + ModelConfigurations.TASK_SETTINGS, + InputType::fromString, + VALID_REQUEST_VALUES, + validationException + ); + Boolean autoTruncate = extractOptionalBoolean(map, AUTO_TRUNCATE, validationException); if (validationException.validationErrors().isEmpty() == false) { throw validationException; } - return new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate); + return new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, inputType); } public static GoogleVertexAiEmbeddingsTaskSettings of( GoogleVertexAiEmbeddingsTaskSettings originalSettings, - GoogleVertexAiEmbeddingsRequestTaskSettings requestSettings + GoogleVertexAiEmbeddingsRequestTaskSettings requestSettings, + InputType requestInputType ) { + var inputTypeToUse = getValidInputType(originalSettings, requestSettings, requestInputType); var autoTruncate = requestSettings.autoTruncate() == null ? originalSettings.autoTruncate : requestSettings.autoTruncate(); - return new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate); + return new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, inputTypeToUse); + } + + private static InputType getValidInputType( + GoogleVertexAiEmbeddingsTaskSettings originalSettings, + GoogleVertexAiEmbeddingsRequestTaskSettings requestTaskSettings, + InputType requestInputType + ) { + InputType inputTypeToUse = originalSettings.inputType; + + if (VALID_REQUEST_VALUES.contains(requestInputType)) { + inputTypeToUse = requestInputType; + } else if (requestTaskSettings.inputType() != null) { + inputTypeToUse = requestTaskSettings.inputType(); + } + + return inputTypeToUse; } + private final InputType inputType; private final Boolean autoTruncate; - public GoogleVertexAiEmbeddingsTaskSettings(@Nullable Boolean autoTruncate) { + public GoogleVertexAiEmbeddingsTaskSettings(@Nullable Boolean autoTruncate, @Nullable InputType inputType) { + validateInputType(inputType); + this.inputType = inputType; this.autoTruncate = autoTruncate; } public GoogleVertexAiEmbeddingsTaskSettings(StreamInput in) throws IOException { this.autoTruncate = in.readOptionalBoolean(); + + var inputType = (in.getTransportVersion().onOrAfter(TransportVersions.VERTEX_AI_INPUT_TYPE_ADDED)) + ? in.readOptionalEnum(InputType.class) + : null; + + validateInputType(inputType); + this.inputType = inputType; + } + + private static void validateInputType(InputType inputType) { + if (inputType == null) { + return; + } + + assert VALID_REQUEST_VALUES.contains(inputType) : invalidInputTypeMessage(inputType); } @Override public boolean isEmpty() { - return autoTruncate == null; + return inputType == null && autoTruncate == null; } public Boolean autoTruncate() { return autoTruncate; } + public InputType getInputType() { + return inputType; + } + @Override public String getWriteableName() { return NAME; @@ -84,11 +149,19 @@ public TransportVersion getMinimalSupportedVersion() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalBoolean(this.autoTruncate); + + if (out.getTransportVersion().onOrAfter(TransportVersions.VERTEX_AI_INPUT_TYPE_ADDED)) { + out.writeOptionalEnum(this.inputType); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); + if (inputType != null) { + builder.field(INPUT_TYPE, inputType); + } + if (autoTruncate != null) { builder.field(AUTO_TRUNCATE, autoTruncate); } @@ -101,19 +174,23 @@ public boolean equals(Object object) { if (this == object) return true; if (object == null || getClass() != object.getClass()) return false; GoogleVertexAiEmbeddingsTaskSettings that = (GoogleVertexAiEmbeddingsTaskSettings) object; - return Objects.equals(autoTruncate, that.autoTruncate); + return Objects.equals(inputType, that.inputType) && Objects.equals(autoTruncate, that.autoTruncate); } @Override public int hashCode() { - return Objects.hash(autoTruncate); + return Objects.hash(autoTruncate, inputType); + } + + public static String invalidInputTypeMessage(InputType inputType) { + return Strings.format("received invalid input type value [%s]", inputType.toString()); } @Override public TaskSettings updatedTaskSettings(Map newSettings) { - GoogleVertexAiEmbeddingsRequestTaskSettings requestSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( + GoogleVertexAiEmbeddingsRequestTaskSettings updatedSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( new HashMap<>(newSettings) ); - return of(this, requestSettings); + return of(this, updatedSettings, updatedSettings.inputType() != null ? updatedSettings.inputType() : this.inputType); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java index 3f9c4f7a66560..e73d8d2e2613a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java @@ -10,6 +10,7 @@ import org.apache.http.client.utils.URIBuilder; import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SettingsConfiguration; @@ -34,8 +35,6 @@ public class GoogleVertexAiRerankModel extends GoogleVertexAiModel { - private URI uri; - public GoogleVertexAiRerankModel( String inferenceEntityId, TaskType taskType, @@ -122,12 +121,8 @@ public GoogleDiscoveryEngineRateLimitServiceSettings rateLimitServiceSettings() return (GoogleDiscoveryEngineRateLimitServiceSettings) super.rateLimitServiceSettings(); } - public URI uri() { - return uri; - } - @Override - public ExecutableAction accept(GoogleVertexAiActionVisitor visitor, Map taskSettings) { + public ExecutableAction accept(GoogleVertexAiActionVisitor visitor, Map taskSettings, InputType inputType) { return visitor.create(this, taskSettings); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java index e0afbf924f654..a2e22e24172cf 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java @@ -125,6 +125,11 @@ public InferenceServiceConfiguration getConfiguration() { return Configuration.get(); } + @Override + public Boolean hideFromConfigurationApi() { + return Boolean.TRUE; + } + @Override public EnumSet supportedTaskTypes() { return supportedTaskTypes; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntityTests.java index f4912e0862e60..18ae7425aaaf2 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntityTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntityTests.java @@ -8,10 +8,12 @@ package org.elasticsearch.xpack.inference.external.request.googlevertexai; import org.elasticsearch.common.Strings; +import org.elasticsearch.inference.InputType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings; import java.io.IOException; import java.util.List; @@ -20,8 +22,11 @@ public class GoogleVertexAiEmbeddingsRequestEntityTests extends ESTestCase { - public void testToXContent_SingleEmbeddingRequest_WritesAutoTruncationIfDefined() throws IOException { - var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc"), true); + public void testToXContent_SingleEmbeddingRequest_WritesAllFields() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity( + List.of("abc"), + new GoogleVertexAiEmbeddingsTaskSettings(true, InputType.SEARCH) + ); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); entity.toXContent(builder, null); @@ -31,7 +36,8 @@ public void testToXContent_SingleEmbeddingRequest_WritesAutoTruncationIfDefined( { "instances": [ { - "content": "abc" + "content": "abc", + "task_type": "RETRIEVAL_QUERY" } ], "parameters": { @@ -42,7 +48,10 @@ public void testToXContent_SingleEmbeddingRequest_WritesAutoTruncationIfDefined( } public void testToXContent_SingleEmbeddingRequest_DoesNotWriteAutoTruncationIfNotDefined() throws IOException { - var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc"), null); + var entity = new GoogleVertexAiEmbeddingsRequestEntity( + List.of("abc"), + new GoogleVertexAiEmbeddingsTaskSettings(null, InputType.INGEST) + ); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); entity.toXContent(builder, null); @@ -52,15 +61,16 @@ public void testToXContent_SingleEmbeddingRequest_DoesNotWriteAutoTruncationIfNo { "instances": [ { - "content": "abc" + "content": "abc", + "task_type": "RETRIEVAL_DOCUMENT" } ] } """)); } - public void testToXContent_MultipleEmbeddingsRequest_WritesAutoTruncationIfDefined() throws IOException { - var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc", "def"), true); + public void testToXContent_SingleEmbeddingRequest_DoesNotWriteInputTypeIfNotDefined() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc"), new GoogleVertexAiEmbeddingsTaskSettings(false, null)); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); entity.toXContent(builder, null); @@ -71,9 +81,35 @@ public void testToXContent_MultipleEmbeddingsRequest_WritesAutoTruncationIfDefin "instances": [ { "content": "abc" + } + ], + "parameters": { + "autoTruncate": false + } + } + """)); + } + + public void testToXContent_MultipleEmbeddingsRequest_WritesAllFields() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity( + List.of("abc", "def"), + new GoogleVertexAiEmbeddingsTaskSettings(true, InputType.CLUSTERING) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "instances": [ + { + "content": "abc", + "task_type": "CLUSTERING" }, { - "content": "def" + "content": "def", + "task_type": "CLUSTERING" } ], "parameters": { @@ -83,8 +119,8 @@ public void testToXContent_MultipleEmbeddingsRequest_WritesAutoTruncationIfDefin """)); } - public void testToXContent_MultipleEmbeddingsRequest_DoesNotWriteAutoTruncationIfNotDefined() throws IOException { - var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc", "def"), null); + public void testToXContent_MultipleEmbeddingsRequest_DoesNotWriteInputTypeIfNotDefined() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc", "def"), new GoogleVertexAiEmbeddingsTaskSettings(true, null)); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); entity.toXContent(builder, null); @@ -99,8 +135,48 @@ public void testToXContent_MultipleEmbeddingsRequest_DoesNotWriteAutoTruncationI { "content": "def" } + ], + "parameters": { + "autoTruncate": true + } + } + """)); + } + + public void testToXContent_MultipleEmbeddingsRequest_DoesNotWriteAutoTruncationIfNotDefined() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity( + List.of("abc", "def"), + new GoogleVertexAiEmbeddingsTaskSettings(null, InputType.CLASSIFICATION) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "instances": [ + { + "content": "abc", + "task_type": "CLASSIFICATION" + }, + { + "content": "def", + "task_type": "CLASSIFICATION" + } ] } """)); } + + public void testToXContent_ThrowsIfInputIsNull() { + expectThrows( + NullPointerException.class, + () -> new GoogleVertexAiEmbeddingsRequestEntity(null, new GoogleVertexAiEmbeddingsTaskSettings(null, InputType.CLASSIFICATION)) + ); + } + + public void testToXContent_ThrowsIfTaskSettingsIsNull() { + expectThrows(NullPointerException.class, () -> new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc", "def"), null)); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestTests.java index b28fd8d3a0cf9..a26d3496bed6b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestTests.java @@ -10,6 +10,7 @@ import org.apache.http.HttpHeaders; import org.apache.http.client.methods.HttpPost; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.common.Truncator; @@ -31,11 +32,11 @@ public class GoogleVertexAiEmbeddingsRequestTests extends ESTestCase { private static final String AUTH_HEADER_VALUE = "foo"; - public void testCreateRequest_WithoutDimensionsSet_And_WithoutAutoTruncateSet() throws IOException { + public void testCreateRequest_WithoutDimensionsSet_And_WithoutAutoTruncateSet_And_WithoutInputTypeSet() throws IOException { var model = "model"; var input = "input"; - var request = createRequest(model, input, null); + var request = createRequest(model, input, null, null); var httpRequest = request.createHttpRequest(); assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); @@ -54,7 +55,7 @@ public void testCreateRequest_WithAutoTruncateSet() throws IOException { var input = "input"; var autoTruncate = true; - var request = createRequest(model, input, autoTruncate); + var request = createRequest(model, input, autoTruncate, null); var httpRequest = request.createHttpRequest(); assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); @@ -68,11 +69,29 @@ public void testCreateRequest_WithAutoTruncateSet() throws IOException { assertThat(requestMap, is(Map.of("instances", List.of(Map.of("content", "input")), "parameters", Map.of("autoTruncate", true)))); } + public void testCreateRequest_WithInputTypeSet() throws IOException { + var model = "model"; + var input = "input"; + + var request = createRequest(model, input, null, InputType.SEARCH); + var httpRequest = request.createHttpRequest(); + + assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest.httpRequestBase(); + + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is(AUTH_HEADER_VALUE)); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(1)); + assertThat(requestMap, is(Map.of("instances", List.of(Map.of("content", "input", "task_type", "RETRIEVAL_QUERY"))))); + } + public void testTruncate_ReducesInputTextSizeByHalf() throws IOException { var model = "model"; var input = "abcd"; - var request = createRequest(model, input, null); + var request = createRequest(model, input, null, null); var truncatedRequest = request.truncate(); var httpRequest = truncatedRequest.createHttpRequest(); @@ -87,8 +106,13 @@ public void testTruncate_ReducesInputTextSizeByHalf() throws IOException { assertThat(requestMap, is(Map.of("instances", List.of(Map.of("content", "ab"))))); } - private static GoogleVertexAiEmbeddingsRequest createRequest(String modelId, String input, @Nullable Boolean autoTruncate) { - var embeddingsModel = GoogleVertexAiEmbeddingsModelTests.createModel(modelId, autoTruncate); + private static GoogleVertexAiEmbeddingsRequest createRequest( + String modelId, + String input, + @Nullable Boolean autoTruncate, + @Nullable InputType inputType + ) { + var embeddingsModel = GoogleVertexAiEmbeddingsModelTests.createModel(modelId, autoTruncate, inputType); return new GoogleVertexAiEmbeddingsWithoutAuthRequest( TruncatorTests.createTruncator(), diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java index 6f28301078853..906a825e49561 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java @@ -13,8 +13,10 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.InferenceServiceConfiguration; +import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.TaskType; @@ -109,7 +111,7 @@ public void testParseRequestConfig_CreatesGoogleVertexAiEmbeddingsModel() throws projectId ) ), - new HashMap<>(Map.of()), + getTaskSettingsMap(true, InputType.INGEST), getSecretSettingsMap(serviceAccountJson) ), modelListener @@ -154,7 +156,7 @@ public void testParseRequestConfig_CreatesAGoogleVertexAiEmbeddingsModelWhenChun projectId ) ), - new HashMap<>(Map.of()), + getTaskSettingsMap(true, InputType.INGEST), createRandomChunkingSettingsMap(), getSecretSettingsMap(serviceAccountJson) ), @@ -200,7 +202,7 @@ public void testParseRequestConfig_CreatesAGoogleVertexAiEmbeddingsModelWhenChun projectId ) ), - new HashMap<>(Map.of()), + getTaskSettingsMap(false, InputType.SEARCH), getSecretSettingsMap(serviceAccountJson) ), modelListener @@ -281,7 +283,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws I "project" ) ), - getTaskSettingsMap(true), + getTaskSettingsMap(true, InputType.SEARCH), getSecretSettingsMap("{}") ); config.put("extra_key", "value"); @@ -308,7 +310,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInServiceSettingsMa ); serviceSettings.put("extra_key", "value"); - var config = getRequestConfigMap(serviceSettings, getTaskSettingsMap(true), getSecretSettingsMap("{}")); + var config = getRequestConfigMap(serviceSettings, getTaskSettingsMap(true, InputType.CLUSTERING), getSecretSettingsMap("{}")); var failureListener = getModelListenerForException( ElasticsearchStatusException.class, @@ -362,7 +364,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInSecretSettingsMap "project" ) ), - getTaskSettingsMap(true), + getTaskSettingsMap(true, null), secretSettings ); @@ -399,7 +401,7 @@ public void testParsePersistedConfigWithSecrets_CreatesGoogleVertexAiEmbeddingsM true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, InputType.SEARCH), getSecretSettingsMap(serviceAccountJson) ); @@ -417,7 +419,7 @@ public void testParsePersistedConfigWithSecrets_CreatesGoogleVertexAiEmbeddingsM assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, InputType.SEARCH))); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } } @@ -447,7 +449,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAGoogleVertexAiEmbeddings true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, null), createRandomChunkingSettingsMap(), getSecretSettingsMap(serviceAccountJson) ); @@ -466,7 +468,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAGoogleVertexAiEmbeddings assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } @@ -497,7 +499,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAnEmbeddingsModelWhenChun true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, null), getSecretSettingsMap(serviceAccountJson) ); @@ -515,7 +517,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAnEmbeddingsModelWhenChun assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } @@ -573,7 +575,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, InputType.INGEST), getSecretSettingsMap(serviceAccountJson) ); persistedConfig.config().put("extra_key", "value"); @@ -592,7 +594,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, InputType.INGEST))); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } } @@ -625,7 +627,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, null), secretSettingsMap ); @@ -643,7 +645,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } } @@ -676,7 +678,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists var persistedConfig = getPersistedConfigMap( serviceSettingsMap, - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, InputType.CLUSTERING), getSecretSettingsMap(serviceAccountJson) ); @@ -694,7 +696,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, InputType.CLUSTERING))); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } } @@ -711,7 +713,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists """; try (var service = createGoogleVertexAiService()) { - var taskSettings = getTaskSettingsMap(autoTruncate); + var taskSettings = getTaskSettingsMap(autoTruncate, InputType.SEARCH); taskSettings.put("extra_key", "value"); var persistedConfig = getPersistedConfigMap( @@ -745,7 +747,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, InputType.SEARCH))); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } } @@ -770,7 +772,7 @@ public void testParsePersistedConfig_CreatesAGoogleVertexAiEmbeddingsModelWhenCh true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, null), createRandomChunkingSettingsMap() ); @@ -783,7 +785,7 @@ public void testParsePersistedConfig_CreatesAGoogleVertexAiEmbeddingsModelWhenCh assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); } } @@ -808,7 +810,7 @@ public void testParsePersistedConfig_CreatesAnEmbeddingsModelWhenChunkingSetting true ) ), - getTaskSettingsMap(autoTruncate) + getTaskSettingsMap(autoTruncate, null) ); var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config()); @@ -820,7 +822,7 @@ public void testParsePersistedConfig_CreatesAnEmbeddingsModelWhenChunkingSetting assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); } } @@ -838,12 +840,44 @@ public void testGetConfiguration() throws Exception { { "task_type": "text_embedding", "configuration": { + "input_type": { + "default_value": null, + "depends_on": [], + "display": "dropdown", + "label": "Input Type", + "options": [ + { + "label": "classification", + "value": "classification" + }, + { + "label": "clustering", + "value": "clustering" + }, + { + "label": "ingest", + "value": "ingest" + }, + { + "label": "search", + "value": "search" + } + ], + "order": 1, + "required": false, + "sensitive": false, + "tooltip": "Specifies the type of input passed to the model.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": "" + }, "auto_truncate": { "default_value": null, "depends_on": [], "display": "toggle", "label": "Auto Truncate", - "order": 1, + "order": 2, "required": false, "sensitive": false, "tooltip": "Specifies if the API truncates inputs longer than the maximum token length automatically.", @@ -1005,11 +1039,15 @@ private static ActionListener getModelListenerForException(Class excep }); } - private static Map getTaskSettingsMap(Boolean autoTruncate) { + private static Map getTaskSettingsMap(Boolean autoTruncate, @Nullable InputType inputType) { var taskSettings = new HashMap(); taskSettings.put(GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, autoTruncate); + if (inputType != null) { + taskSettings.put(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, inputType.toString()); + } + return taskSettings; } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java index 68d03d350d06e..7836c5c15cfb1 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java @@ -10,14 +10,18 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiSecretSettings; +import org.hamcrest.MatcherAssert; import java.net.URI; import java.net.URISyntaxException; +import java.util.Map; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettingsTests.getTaskSettingsMap; import static org.hamcrest.Matchers.is; public class GoogleVertexAiEmbeddingsModelTests extends ESTestCase { @@ -45,6 +49,75 @@ public void testBuildUri() throws URISyntaxException { ); } + public void testOverrideWith_DoesNotOverrideAndModelRemainsEqual_WhenSettingsAreEmpty_AndInputTypeIsInvalid() { + var model = createModel("model", Boolean.FALSE, InputType.SEARCH); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, Map.of(), InputType.UNSPECIFIED); + + MatcherAssert.assertThat(overriddenModel, is(model)); + } + + public void testOverrideWith_DoesNotOverrideAndModelRemainsEqual_WhenSettingsAreNull_AndInputTypeIsInvalid() { + var model = createModel("model", Boolean.FALSE, InputType.SEARCH); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, null, InputType.UNSPECIFIED); + + MatcherAssert.assertThat(overriddenModel, is(model)); + } + + public void testOverrideWith_SetsInputTypeToOverride_WhenFieldIsNullInModelTaskSettings_AndNullInRequestTaskSettings() { + var model = createModel("model", Boolean.FALSE, null); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, null), InputType.SEARCH); + + var expectedModel = createModel("model", Boolean.FALSE, InputType.SEARCH); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_SetsInputType_FromRequest_IfValid_OverridingStoredTaskSettings() { + var model = createModel("model", Boolean.FALSE, InputType.INGEST); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, null), InputType.SEARCH); + + var expectedModel = createModel("model", Boolean.FALSE, InputType.SEARCH); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_SetsInputType_FromRequest_IfValid_OverridingRequestTaskSettings() { + var model = createModel("model", Boolean.FALSE, null); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, InputType.CLUSTERING), InputType.SEARCH); + + var expectedModel = createModel("model", Boolean.FALSE, InputType.SEARCH); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_OverridesInputType_WithRequestTaskSettingsSearch_WhenRequestInputTypeIsInvalid() { + var model = createModel("model", Boolean.FALSE, InputType.INGEST); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, InputType.SEARCH), InputType.UNSPECIFIED); + + var expectedModel = createModel("model", Boolean.FALSE, InputType.SEARCH); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_DoesNotSetInputType_FromRequest_IfInputTypeIsInvalid() { + var model = createModel("model", Boolean.FALSE, null); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, null), InputType.UNSPECIFIED); + + var expectedModel = createModel("model", Boolean.FALSE, null); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_DoesNotSetInputType_WhenRequestTaskSettingsIsNull_AndRequestInputTypeIsInvalid() { + var model = createModel("model", Boolean.FALSE, InputType.INGEST); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, null), InputType.UNSPECIFIED); + + var expectedModel = createModel("model", Boolean.FALSE, InputType.INGEST); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_DoesNotOverrideModelUri() { + var model = createModel("model", Boolean.FALSE, InputType.SEARCH); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, Map.of(), null); + + MatcherAssert.assertThat(overriddenModel.uri(), is(model.uri())); + } + public static GoogleVertexAiEmbeddingsModel createModel( String location, String projectId, @@ -58,12 +131,37 @@ public static GoogleVertexAiEmbeddingsModel createModel( "service", uri, new GoogleVertexAiEmbeddingsServiceSettings(location, projectId, modelId, false, null, null, null, null), - new GoogleVertexAiEmbeddingsTaskSettings(Boolean.FALSE), + new GoogleVertexAiEmbeddingsTaskSettings(Boolean.FALSE, null), new GoogleVertexAiSecretSettings(new SecureString(serviceAccountJson.toCharArray())) ); } - public static GoogleVertexAiEmbeddingsModel createModel(String modelId, @Nullable Boolean autoTruncate) { + public static GoogleVertexAiEmbeddingsModel createModel(String modelId, @Nullable Boolean autoTruncate, @Nullable InputType inputType) { + return new GoogleVertexAiEmbeddingsModel( + "id", + TaskType.TEXT_EMBEDDING, + "service", + new GoogleVertexAiEmbeddingsServiceSettings( + "location", + "projectId", + modelId, + false, + null, + null, + SimilarityMeasure.DOT_PRODUCT, + null + ), + new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, inputType), + null, + new GoogleVertexAiSecretSettings(new SecureString("testString".toCharArray())) + ); + } + + public static GoogleVertexAiEmbeddingsModel createRandomizedModel( + String modelId, + @Nullable Boolean autoTruncate, + @Nullable InputType inputType + ) { return new GoogleVertexAiEmbeddingsModel( "id", TaskType.TEXT_EMBEDDING, @@ -78,7 +176,7 @@ public static GoogleVertexAiEmbeddingsModel createModel(String modelId, @Nullabl SimilarityMeasure.DOT_PRODUCT, null ), - new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate), + new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, inputType), null, new GoogleVertexAiSecretSettings(new SecureString(randomAlphaOfLength(8).toCharArray())) ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettingsTests.java index 1e9a2f435cb08..a49e0f2e3f57d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettingsTests.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.inference.services.googlevertexai.embeddings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.inference.InputType; import org.elasticsearch.test.ESTestCase; import java.util.HashMap; @@ -21,9 +23,14 @@ public void testFromMap_ReturnsEmptySettings_IfMapEmpty() { assertThat(requestTaskSettings, is(GoogleVertexAiEmbeddingsRequestTaskSettings.EMPTY_SETTINGS)); } + public void testFromMap_ReturnsEmptySettings_IfMapNull() { + var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap(null); + assertThat(requestTaskSettings, is(GoogleVertexAiEmbeddingsRequestTaskSettings.EMPTY_SETTINGS)); + } + public void testFromMap_DoesNotThrowValidationException_IfAutoTruncateIsMissing() { var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of("unrelated", true))); - assertThat(requestTaskSettings, is(new GoogleVertexAiEmbeddingsRequestTaskSettings(null))); + assertThat(requestTaskSettings, is(new GoogleVertexAiEmbeddingsRequestTaskSettings(null, null))); } public void testFromMap_ExtractsAutoTruncate() { @@ -31,6 +38,40 @@ public void testFromMap_ExtractsAutoTruncate() { var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, autoTruncate)) ); - assertThat(requestTaskSettings, is(new GoogleVertexAiEmbeddingsRequestTaskSettings(autoTruncate))); + assertThat(requestTaskSettings, is(new GoogleVertexAiEmbeddingsRequestTaskSettings(autoTruncate, null))); + } + + public void testFromMap_ThrowsValidationException_IfAutoTruncateIsInvalidValue() { + expectThrows( + ValidationException.class, + () -> GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, "invalid")) + ) + ); + } + + public void testFromMap_ExtractsInputType() { + var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, InputType.INGEST.toString())) + ); + assertThat(requestTaskSettings, is(new GoogleVertexAiEmbeddingsRequestTaskSettings(null, InputType.INGEST))); + } + + public void testFromMap_ThrowsValidationException_IfInputTypeIsInvalidValue() { + expectThrows( + ValidationException.class, + () -> GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, "abc")) + ) + ); + } + + public void testFromMap_ThrowsValidationException_IfInputTypeIsUnspecified() { + expectThrows( + ValidationException.class, + () -> GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, InputType.UNSPECIFIED.toString())) + ) + ); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettingsTests.java index 5b87bbc3c42c8..0a390b114702c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettingsTests.java @@ -8,21 +8,30 @@ package org.elasticsearch.xpack.inference.services.googlevertexai.embeddings; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; +import org.hamcrest.MatcherAssert; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; +import java.util.EnumSet; import java.util.HashMap; +import java.util.Locale; import java.util.Map; +import static org.elasticsearch.xpack.inference.InputTypeTests.randomWithoutUnspecified; import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.VALID_REQUEST_VALUES; import static org.hamcrest.Matchers.is; public class GoogleVertexAiEmbeddingsTaskSettingsTests extends AbstractBWCWireSerializationTestCase { @@ -39,6 +48,9 @@ public void testUpdatedTaskSettings() { if (newSettings.autoTruncate() != null) { newSettingsMap.put(GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, newSettings.autoTruncate()); } + if (newSettings.getInputType() != null) { + newSettingsMap.put(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, newSettings.getInputType().toString()); + } GoogleVertexAiEmbeddingsTaskSettings updatedSettings = (GoogleVertexAiEmbeddingsTaskSettings) initialSettings.updatedTaskSettings( Collections.unmodifiableMap(newSettingsMap) ); @@ -47,56 +59,144 @@ public void testUpdatedTaskSettings() { } else { assertEquals(newSettings.autoTruncate(), updatedSettings.autoTruncate()); } + if (newSettings.getInputType() == null) { + assertEquals(initialSettings.getInputType(), updatedSettings.getInputType()); + } else { + assertEquals(newSettings.getInputType(), updatedSettings.getInputType()); + } + } + + public void testFromMap_CreatesEmptySettings_WhenAllFieldsAreNull() { + MatcherAssert.assertThat( + GoogleVertexAiEmbeddingsTaskSettings.fromMap(new HashMap<>()), + is(new GoogleVertexAiEmbeddingsTaskSettings(null, null)) + ); + assertNull(GoogleVertexAiEmbeddingsTaskSettings.fromMap(new HashMap<>()).autoTruncate()); + assertNull(GoogleVertexAiEmbeddingsTaskSettings.fromMap(new HashMap<>()).getInputType()); + } + + public void testFromMap_CreatesEmptySettings_WhenMapIsNull() { + MatcherAssert.assertThat( + GoogleVertexAiEmbeddingsTaskSettings.fromMap(null), + is(new GoogleVertexAiEmbeddingsTaskSettings(null, null)) + ); + assertNull(GoogleVertexAiEmbeddingsTaskSettings.fromMap(null).autoTruncate()); + assertNull(GoogleVertexAiEmbeddingsTaskSettings.fromMap(null).getInputType()); } public void testFromMap_AutoTruncateIsSet() { var autoTruncate = true; - var taskSettingsMap = getTaskSettingsMap(autoTruncate); + var taskSettingsMap = getTaskSettingsMap(autoTruncate, null); var taskSettings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(taskSettingsMap); - assertThat(taskSettings, is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(taskSettings, is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); } public void testFromMap_ThrowsValidationException_IfAutoTruncateIsInvalidValue() { - var taskSettings = getTaskSettingsMap("invalid"); + var taskSettings = getTaskSettingsMap("invalid", null); expectThrows(ValidationException.class, () -> GoogleVertexAiEmbeddingsTaskSettings.fromMap(taskSettings)); } public void testFromMap_AutoTruncateIsNull() { - var taskSettingsMap = getTaskSettingsMap(null); + var taskSettingsMap = getTaskSettingsMap(null, null); var taskSettings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(taskSettingsMap); // needed, because of constructors being ambiguous otherwise Boolean nullBoolean = null; - assertThat(taskSettings, is(new GoogleVertexAiEmbeddingsTaskSettings(nullBoolean))); + assertThat(taskSettings, is(new GoogleVertexAiEmbeddingsTaskSettings(nullBoolean, null))); } - public void testFromMap_DoesNotThrow_WithEmptyMap() { - assertNull(GoogleVertexAiEmbeddingsTaskSettings.fromMap(new HashMap<>()).autoTruncate()); + public void testFromMap_ReturnsFailure_WhenInputTypeIsInvalid() { + var exception = expectThrows( + ValidationException.class, + () -> GoogleVertexAiEmbeddingsTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, "abc")) + ) + ); + + assertThat( + exception.getMessage(), + is( + Strings.format( + "Validation Failed: 1: [task_settings] Invalid value [abc] received. [input_type] must be one of [%s];", + getValidValuesSortedAndCombined(VALID_REQUEST_VALUES) + ) + ) + ); + } + + public void testFromMap_ReturnsFailure_WhenInputTypeIsUnspecified() { + var exception = expectThrows( + ValidationException.class, + () -> GoogleVertexAiEmbeddingsTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, InputType.UNSPECIFIED.toString())) + ) + ); + + assertThat( + exception.getMessage(), + is( + Strings.format( + "Validation Failed: 1: [task_settings] Invalid value [unspecified] received. [input_type] must be one of [%s];", + getValidValuesSortedAndCombined(VALID_REQUEST_VALUES) + ) + ) + ); } public void testOf_UseRequestSettings() { var originalAutoTruncate = true; - var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate); + var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate, null); var requestAutoTruncate = originalAutoTruncate == false; - var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(requestAutoTruncate); + var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(requestAutoTruncate, null); - assertThat(GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings).autoTruncate(), is(requestAutoTruncate)); + assertThat( + GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings, null).autoTruncate(), + is(requestAutoTruncate) + ); + } + + public void testOf_UseRequestSettings_AndRequestInputType() { + var originalAutoTruncate = true; + var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate, InputType.SEARCH); + + var requestAutoTruncate = originalAutoTruncate == false; + var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(requestAutoTruncate, null); + + assertThat( + GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings, InputType.INGEST).getInputType(), + is(InputType.INGEST) + ); } public void testOf_UseOriginalSettings() { var originalAutoTruncate = true; - var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate); + var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate, null); - var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(null); + var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(null, null); - assertThat(GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings).autoTruncate(), is(originalAutoTruncate)); + assertThat( + GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings, null).autoTruncate(), + is(originalAutoTruncate) + ); + } + + public void testOf_UseOriginalSettings_WithInputType() { + var originalAutoTruncate = true; + var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate, InputType.INGEST); + + var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(null, null); + + assertThat( + GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings, null).autoTruncate(), + is(originalAutoTruncate) + ); } public void testToXContent_WritesAutoTruncateIfNotNull() throws IOException { - var settings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(getTaskSettingsMap(true)); + var settings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(getTaskSettingsMap(true, null)); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); settings.toXContent(builder, null); @@ -107,7 +207,7 @@ public void testToXContent_WritesAutoTruncateIfNotNull() throws IOException { } public void testToXContent_DoesNotWriteAutoTruncateIfNull() throws IOException { - var settings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(getTaskSettingsMap(null)); + var settings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(getTaskSettingsMap(null, null)); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); settings.toXContent(builder, null); @@ -117,6 +217,25 @@ public void testToXContent_DoesNotWriteAutoTruncateIfNull() throws IOException { {}""")); } + public void testToXContent_WritesInputTypeIfNotNull() throws IOException { + var settings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(getTaskSettingsMap(true, InputType.INGEST)); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + settings.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"input_type":"ingest","auto_truncate":true}""")); + } + + public void testToXContent_ThrowsAssertionFailure_WhenInputTypeIsUnspecified() { + var thrownException = expectThrows( + AssertionError.class, + () -> new GoogleVertexAiEmbeddingsTaskSettings(false, InputType.UNSPECIFIED) + ); + assertThat(thrownException.getMessage(), is("received invalid input type value [unspecified]")); + } + @Override protected Writeable.Reader instanceReader() { return GoogleVertexAiEmbeddingsTaskSettings::new; @@ -137,20 +256,37 @@ protected GoogleVertexAiEmbeddingsTaskSettings mutateInstanceForVersion( GoogleVertexAiEmbeddingsTaskSettings instance, TransportVersion version ) { + if (version.before(TransportVersions.VERTEX_AI_INPUT_TYPE_ADDED)) { + // default to null input type if node is on a version before input type was introduced + return new GoogleVertexAiEmbeddingsTaskSettings(instance.autoTruncate(), null); + } return instance; } private static GoogleVertexAiEmbeddingsTaskSettings createRandom() { - return new GoogleVertexAiEmbeddingsTaskSettings(randomFrom(new Boolean[] { null, randomBoolean() })); + var inputType = randomBoolean() ? randomWithoutUnspecified() : null; + var autoTruncate = randomFrom(new Boolean[] { null, randomBoolean() }); + return new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, inputType); + } + + private static > String getValidValuesSortedAndCombined(EnumSet validValues) { + var validValuesAsStrings = validValues.stream().map(value -> value.toString().toLowerCase(Locale.ROOT)).toArray(String[]::new); + Arrays.sort(validValuesAsStrings); + + return String.join(", ", validValuesAsStrings); } - private static Map getTaskSettingsMap(@Nullable Object autoTruncate) { + public static Map getTaskSettingsMap(@Nullable Object autoTruncate, @Nullable InputType inputType) { var map = new HashMap(); if (autoTruncate != null) { map.put(AUTO_TRUNCATE, autoTruncate); } + if (inputType != null) { + map.put(INPUT_TYPE, inputType.toString()); + } + return map; } } diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1UnavailableRemotesIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1UnavailableRemotesIT.java new file mode 100644 index 0000000000000..b6fc43e2a6e48 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1UnavailableRemotesIT.java @@ -0,0 +1,286 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.remotecluster; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.greaterThan; + +public class CrossClusterEsqlRCS1UnavailableRemotesIT extends AbstractRemoteClusterSecurityTestCase { + private static final AtomicBoolean SSL_ENABLED_REF = new AtomicBoolean(); + + static { + fulfillingCluster = ElasticsearchCluster.local() + .name("fulfilling-cluster") + .nodes(1) + .module("x-pack-esql") + .module("x-pack-enrich") + .apply(commonClusterConfig) + .setting("remote_cluster.port", "0") + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.remote_cluster_server.ssl.enabled", () -> String.valueOf(SSL_ENABLED_REF.get())) + .setting("xpack.security.remote_cluster_server.ssl.key", "remote-cluster.key") + .setting("xpack.security.remote_cluster_server.ssl.certificate", "remote-cluster.crt") + .setting("xpack.security.authc.token.enabled", "true") + .keystore("xpack.security.remote_cluster_server.ssl.secure_key_passphrase", "remote-cluster-password") + .node(0, spec -> spec.setting("remote_cluster_server.enabled", "true")) + .build(); + + queryCluster = ElasticsearchCluster.local() + .name("query-cluster") + .module("x-pack-esql") + .module("x-pack-enrich") + .apply(commonClusterConfig) + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.remote_cluster_client.ssl.enabled", () -> String.valueOf(SSL_ENABLED_REF.get())) + .build(); + } + + @ClassRule + public static TestRule clusterRule = RuleChain.outerRule(fulfillingCluster).around(queryCluster); + + @Before + public void setupPreRequisites() throws IOException { + setupRolesAndPrivileges(); + loadData(); + } + + public void testEsqlRcs1UnavailableRemoteScenarios() throws Exception { + clusterShutDownWithRandomSkipUnavailable(); + remoteClusterShutdownWithSkipUnavailableTrue(); + remoteClusterShutdownWithSkipUnavailableFalse(); + } + + private void clusterShutDownWithRandomSkipUnavailable() throws Exception { + // skip_unavailable is set to a random boolean value. + // However, no clusters are stopped. Hence, we do not expect any other behaviour + // other than a 200-OK. + + configureRemoteCluster("my_remote_cluster", fulfillingCluster, true, randomBoolean(), randomBoolean()); + String query = "FROM *,my_remote_cluster:* | LIMIT 10"; + Response response = client().performRequest(esqlRequest(query)); + + Map map = responseAsMap(response); + ArrayList columns = (ArrayList) map.get("columns"); + ArrayList values = (ArrayList) map.get("values"); + Map clusters = (Map) map.get("_clusters"); + Map clusterDetails = (Map) clusters.get("details"); + Map localClusterDetails = (Map) clusterDetails.get("(local)"); + Map remoteClusterDetails = (Map) clusterDetails.get("my_remote_cluster"); + + assertOK(response); + assertThat((int) map.get("took"), greaterThan(0)); + assertThat(columns.size(), is(4)); + assertThat(values.size(), is(9)); + + assertThat((int) clusters.get("total"), is(2)); + assertThat((int) clusters.get("successful"), is(2)); + assertThat((int) clusters.get("running"), is(0)); + assertThat((int) clusters.get("skipped"), is(0)); + assertThat((int) clusters.get("partial"), is(0)); + assertThat((int) clusters.get("failed"), is(0)); + + assertThat(clusterDetails.size(), is(2)); + assertThat((int) localClusterDetails.get("took"), greaterThan(0)); + assertThat(localClusterDetails.get("status"), is("successful")); + + assertThat((int) remoteClusterDetails.get("took"), greaterThan(0)); + assertThat(remoteClusterDetails.get("status"), is("successful")); + } + + @SuppressWarnings("unchecked") + private void remoteClusterShutdownWithSkipUnavailableTrue() throws Exception { + // Remote cluster is stopped and skip unavailable is set to true. + // We expect no exception and partial results from the remaining open cluster. + + configureRemoteCluster("my_remote_cluster", fulfillingCluster, true, randomBoolean(), true); + + try { + // Stop remote cluster. + fulfillingCluster.stop(true); + + // A simple query that targets our remote cluster. + String query = "FROM *,my_remote_cluster:* | LIMIT 10"; + Response response = client().performRequest(esqlRequest(query)); + + Map map = responseAsMap(response); + ArrayList columns = (ArrayList) map.get("columns"); + ArrayList values = (ArrayList) map.get("values"); + Map clusters = (Map) map.get("_clusters"); + Map clusterDetails = (Map) clusters.get("details"); + Map localClusterDetails = (Map) clusterDetails.get("(local)"); + Map remoteClusterDetails = (Map) clusterDetails.get("my_remote_cluster"); + + // Assert results obtained from the local cluster and that remote cluster was + // skipped. + assertOK(response); + assertThat((int) map.get("took"), greaterThan(0)); + + assertThat(columns.size(), is(2)); + assertThat(values.size(), is(5)); + + assertThat((int) clusters.get("total"), is(2)); + assertThat((int) clusters.get("successful"), is(1)); + assertThat((int) clusters.get("skipped"), is(1)); + assertThat((int) clusters.get("running"), is(0)); + assertThat((int) clusters.get("partial"), is(0)); + assertThat((int) clusters.get("failed"), is(0)); + + assertThat(clusterDetails.size(), is(2)); + assertThat((int) localClusterDetails.get("took"), greaterThan(0)); + assertThat(localClusterDetails.get("status"), is("successful")); + + assertThat((int) remoteClusterDetails.get("took"), greaterThan(0)); + assertThat(remoteClusterDetails.get("status"), is("skipped")); + + } catch (ResponseException r) { + throw new AssertionError(r); + } finally { + fulfillingCluster.start(); + closeFulfillingClusterClient(); + initFulfillingClusterClient(); + } + } + + private void remoteClusterShutdownWithSkipUnavailableFalse() throws Exception { + // Remote cluster is stopped and skip_unavailable is set to false. + // Although the other cluster is open, we expect an Exception. + + configureRemoteCluster("my_remote_cluster", fulfillingCluster, true, randomBoolean(), false); + + try { + // Stop remote cluster. + fulfillingCluster.stop(true); + + // A simple query that targets our remote cluster. + String query = "FROM *,my_remote_cluster:* | LIMIT 10"; + ResponseException ex = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(query))); + assertThat(ex.getMessage(), containsString("connect_transport_exception")); + } finally { + fulfillingCluster.start(); + closeFulfillingClusterClient(); + initFulfillingClusterClient(); + } + } + + private void setupRolesAndPrivileges() throws IOException { + var putUserRequest = new Request("PUT", "/_security/user/" + REMOTE_SEARCH_USER); + putUserRequest.setJsonEntity(""" + { + "password": "x-pack-test-password", + "roles" : ["remote_search"] + }"""); + assertOK(adminClient().performRequest(putUserRequest)); + + var putRoleOnRemoteClusterRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); + putRoleOnRemoteClusterRequest.setJsonEntity(""" + { + "indices": [ + { + "names": ["points", "squares"], + "privileges": ["read", "read_cross_cluster", "create_index", "monitor"] + } + ], + "remote_indices": [ + { + "names": ["points", "squares"], + "privileges": ["read", "read_cross_cluster", "create_index", "monitor"], + "clusters": ["my_remote_cluster"] + } + ] + }"""); + assertOK(adminClient().performRequest(putRoleOnRemoteClusterRequest)); + } + + private void loadData() throws IOException { + Request createIndex = new Request("PUT", "points"); + createIndex.setJsonEntity(""" + { + "mappings": { + "properties": { + "id": { "type": "integer" }, + "score": { "type": "integer" } + } + } + } + """); + assertOK(client().performRequest(createIndex)); + + Request bulkRequest = new Request("POST", "/_bulk?refresh=true"); + bulkRequest.setJsonEntity(""" + { "index": { "_index": "points" } } + { "id": 1, "score": 75} + { "index": { "_index": "points" } } + { "id": 2, "score": 125} + { "index": { "_index": "points" } } + { "id": 3, "score": 100} + { "index": { "_index": "points" } } + { "id": 4, "score": 50} + { "index": { "_index": "points" } } + { "id": 5, "score": 150} + """); + assertOK(client().performRequest(bulkRequest)); + + createIndex = new Request("PUT", "squares"); + createIndex.setJsonEntity(""" + { + "mappings": { + "properties": { + "num": { "type": "integer" }, + "square": { "type": "integer" } + } + } + } + """); + assertOK(performRequestAgainstFulfillingCluster(createIndex)); + + bulkRequest = new Request("POST", "/_bulk?refresh=true"); + bulkRequest.setJsonEntity(""" + { "index": {"_index": "squares"}} + { "num": 1, "square": 1 } + { "index": {"_index": "squares"}} + { "num": 2, "square": 4 } + { "index": {"_index": "squares"}} + { "num": 3, "square": 9 } + { "index": {"_index": "squares"}} + { "num": 4, "square": 16 } + """); + assertOK(performRequestAgainstFulfillingCluster(bulkRequest)); + } + + private Request esqlRequest(String query) throws IOException { + XContentBuilder body = JsonXContent.contentBuilder(); + + body.startObject(); + body.field("query", query); + body.field("include_ccs_metadata", true); + body.endObject(); + + Request request = new Request("POST", "_query"); + request.setJsonEntity(Strings.toString(body)); + + return request; + } +}