diff --git a/docs/changelog/114193.yaml b/docs/changelog/114193.yaml new file mode 100644 index 0000000000000..f18f9359007b8 --- /dev/null +++ b/docs/changelog/114193.yaml @@ -0,0 +1,5 @@ +pr: 114193 +summary: Add postal_code support to the City and Enterprise databases +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/114268.yaml b/docs/changelog/114268.yaml new file mode 100644 index 0000000000000..5e4457005d7d3 --- /dev/null +++ b/docs/changelog/114268.yaml @@ -0,0 +1,5 @@ +pr: 114268 +summary: Support more maxmind fields in the geoip processor +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/114521.yaml b/docs/changelog/114521.yaml new file mode 100644 index 0000000000000..c3a9c7cdd0848 --- /dev/null +++ b/docs/changelog/114521.yaml @@ -0,0 +1,5 @@ +pr: 114521 +summary: Add support for registered country fields for maxmind geoip databases +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/116447.yaml b/docs/changelog/116447.yaml new file mode 100644 index 0000000000000..8c0cea4b54578 --- /dev/null +++ b/docs/changelog/116447.yaml @@ -0,0 +1,5 @@ +pr: 116447 +summary: Adding a deprecation info API warning for data streams with old indices +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/116583.yaml b/docs/changelog/116583.yaml new file mode 100644 index 0000000000000..3dc8337fe5b86 --- /dev/null +++ b/docs/changelog/116583.yaml @@ -0,0 +1,7 @@ +pr: 116583 +summary: Fix NPE in `EnrichLookupService` on mixed clusters with <8.14 versions +area: ES|QL +type: bug +issues: + - 116529 + - 116544 diff --git a/docs/reference/datatiers.asciidoc b/docs/reference/datatiers.asciidoc index c37f54b5c9cae..65e029d876e6f 100644 --- a/docs/reference/datatiers.asciidoc +++ b/docs/reference/datatiers.asciidoc @@ -37,9 +37,8 @@ TIP: The performance of an {es} node is often limited by the performance of the For example hardware profiles, refer to Elastic Cloud's {cloud}/ec-reference-hardware.html[instance configurations]. Review our recommendations for optimizing your storage for <> and <>. -IMPORTANT: {es} generally expects nodes within a data tier to share the same -hardware profile. Variations not following this recommendation should be -carefully architected to avoid <>. +IMPORTANT: {es} assumes nodes within a data tier share the same hardware profile (such as CPU, RAM, disk capacity). +Data tiers with unequally resourced nodes have a higher risk of <>. The way data tiers are used often depends on the data's category: diff --git a/docs/reference/esql/functions/kibana/definition/repeat.json b/docs/reference/esql/functions/kibana/definition/repeat.json index b8660b6362e30..201484cf7aa6f 100644 --- a/docs/reference/esql/functions/kibana/definition/repeat.json +++ b/docs/reference/esql/functions/kibana/definition/repeat.json @@ -42,7 +42,7 @@ } ], "examples" : [ - "ROW a = \"Hello!\"\n| EVAL triple_a = REPEAT(a, 3);" + "ROW a = \"Hello!\"\n| EVAL triple_a = REPEAT(a, 3)" ], "preview" : false, "snapshot_only" : false diff --git a/docs/reference/esql/functions/kibana/docs/repeat.md b/docs/reference/esql/functions/kibana/docs/repeat.md index cc46e8282d9fe..4949d86a28f46 100644 --- a/docs/reference/esql/functions/kibana/docs/repeat.md +++ b/docs/reference/esql/functions/kibana/docs/repeat.md @@ -7,5 +7,5 @@ Returns a string constructed by concatenating `string` with itself the specified ``` ROW a = "Hello!" -| EVAL triple_a = REPEAT(a, 3); +| EVAL triple_a = REPEAT(a, 3) ``` diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index 3a9ba58dedbf0..2eff56f87e826 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -51,10 +51,12 @@ field instead. *Depends on what is available in `database_file`: * If a GeoLite2 City or GeoIP2 City database is used, then the following fields may be added under the `target_field`: `ip`, -`country_iso_code`, `country_name`, `continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, -and `location`. The fields actually added depend on what has been found and which properties were configured in `properties`. +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `postal_code`, `timezone`, +`location`, and `accuracy_radius`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If a GeoLite2 Country or GeoIP2 Country database is used, then the following fields may be added under the `target_field`: `ip`, -`country_iso_code`, `country_name`, `continent_code`, and `continent_name`. The fields actually added depend on what has been found +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, and `continent_name`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If the GeoLite2 ASN database is used, then the following fields may be added under the `target_field`: `ip`, `asn`, `organization_name` and `network`. The fields actually added depend on what has been found and which properties were configured @@ -70,10 +72,12 @@ The fields actually added depend on what has been found and which properties wer `organization_name`, `network`, `isp`, `isp_organization_name`, `mobile_country_code`, and `mobile_network_code`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If the GeoIP2 Enterprise database is used, then the following fields may be added under the `target_field`: `ip`, -`country_iso_code`, `country_name`, `continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, -`location`, `asn`, `organization_name`, `network`, `hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `postal_code`, `timezone`, +`location`, `accuracy_radius`, `country_confidence`, `city_confidence`, `postal_confidence`, `asn`, `organization_name`, `network`, +`hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, `residential_proxy`, `domain`, `isp`, `isp_organization_name`, `mobile_country_code`, `mobile_network_code`, `user_type`, and -`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. +`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. preview::["Do not use the GeoIP2 Anonymous IP, GeoIP2 Connection Type, GeoIP2 Domain, GeoIP2 ISP, and GeoIP2 Enterprise databases in production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] diff --git a/docs/reference/search/search-your-data/retrievers-examples.asciidoc b/docs/reference/search/search-your-data/retrievers-examples.asciidoc new file mode 100644 index 0000000000000..8cd1a4bf5ce98 --- /dev/null +++ b/docs/reference/search/search-your-data/retrievers-examples.asciidoc @@ -0,0 +1,428 @@ +[[retrievers-examples]] +=== Retrievers examples + +Learn how to combine different retrievers in these hands-on examples. +To demonstrate the full functionality of retrievers, these examples require access to a <> set up using the <>. + +[discrete] +[[retrievers-examples-setup]] +==== Add example data + +To begin with, we'll set up the necessary services and have them in place for later use. + +[source,js] +---- +// Setup rerank task stored as `my-rerank-model` +PUT _inference/rerank/my-rerank-model +{ + "service": "cohere", + "service_settings": { + "model_id": "rerank-english-v3.0", + "api_key": "{{COHERE_API_KEY}}" + } +} +---- +//NOTCONSOLE + +Now that we have our reranking service in place, lets create the `retrievers_example` index, and add some documents to it. +[source,js] +---- +PUT retrievers_example +{ + "mappings": { + "properties": { + "vector": { + "type": "dense_vector", + "dims": 3, + "similarity": "l2_norm", + "index": true + }, + "text": { + "type": "text" + }, + "year": { + "type": "integer" + }, + "topic": { + "type": "keyword" + } + } + } +} +---- +//NOTCONSOLE + +[source,js] +---- +POST /retrievers_example/_doc/1 +{ + "vector": [0.23, 0.67, 0.89], + "text": "Large language models are revolutionizing information retrieval by boosting search precision, deepening contextual understanding, and reshaping user experiences in data-rich environments.", + "year": 2024, + "topic": ["llm", "ai", "information_retrieval"] +} + +POST /retrievers_example/_doc/2 +{ + "vector": [0.12, 0.56, 0.78], + "text": "Artificial intelligence is transforming medicine, from advancing diagnostics and tailoring treatment plans to empowering predictive patient care for improved health outcomes.", + "year": 2023, + "topic": ["ai", "medicine"] +} + +POST /retrievers_example/_doc/3 +{ + "vector": [0.45, 0.32, 0.91], + "text": "AI is redefining security by enabling advanced threat detection, proactive risk analysis, and dynamic defenses against increasingly sophisticated cyber threats.", + "year": 2024, + "topic": ["ai", "security"] +} + +POST /retrievers_example/_doc/4 +{ + "vector": [0.34, 0.21, 0.98], + "text": "Elastic introduces Elastic AI Assistant, the open, generative AI sidekick powered by ESRE to democratize cybersecurity and enable users of every skill level.", + "year": 2023, + "topic": ["ai", "elastic", "assistant"] +} + +POST /retrievers_example/_doc/5 +{ + "vector": [0.11, 0.65, 0.47], + "text": "Learn how to spin up a deployment of our hosted Elasticsearch Service and use Elastic Observability to gain deeper insight into the behavior of your applications and systems.", + "year": 2024, + "topic": ["documentation", "observability", "elastic"] +} + +---- +//NOTCONSOLE + +Now that we also have our documents in place, let's try to run some queries using retrievers. + +[discrete] +[[retrievers-examples-combining-standard-knn-retrievers-with-rrf]] +==== Example: Combining query and kNN with RRF + +First, let's examine how to combine two different types of queries: a `kNN` query and a +`query_string` query. While these queries may produce scores in different ranges, we can use +Reciprocal Rank Fusion (`rrf`) to combine the results and generate a merged final result +list. + +To implement this in the retriever framework, we start with the top-level element: our `rrf` +retriever. This retriever operates on top of two other retrievers: a `knn` retriever and a +`standard` retriever. Our query structure would look like this: + +[source,js] +---- +GET /retrievers_example/_search +{ + "retriever":{ + "rrf": { + "retrievers":[ + { + "standard":{ + "query":{ + "query_string":{ + "query": "(information retrieval) OR (artificial intelligence)", + "default_field": "text" + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + "_source": ["text", "topic"] +} +---- +//NOTCONSOLE + +[discrete] +[[retrievers-examples-collapsing-retriever-results]] +==== Example: Grouping results by year with `collapse` + +In our result set, we have many documents with the same `year` value. We can clean this +up using the `collapse` parameter with our retriever. This enables grouping results by +any field and returns only the highest-scoring document from each group. In this example +we'll collapse our results based on the `year` field. + +[source,js] +---- +GET /retrievers_example/_search +{ + "retriever":{ + "rrf": { + "retrievers":[ + { + "standard":{ + "query":{ + "query_string":{ + "query": "(information retrieval) OR (artificial intelligence)", + "default_field": "text" + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + "collapse": { + "field": "year", + "inner_hits": { + "name": "topic related documents", + "_source": ["text", "year"] + } + }, + "_source": ["text", "topic"] +} +---- +//NOTCONSOLE + +[discrete] +[[retrievers-examples-text-similarity-reranker-on-top-of-rrf]] +==== Example: Rerank results of an RRF retriever + +Previously, we used a `text_similarity_reranker` retriever within an `rrf` retriever. +Because retrievers support full composability, we can also rerank the results of an +`rrf` retriever. Let's apply this to our first example. + +[source,js] +---- +GET retrievers_example/_search +{ + "retriever": { + "text_similarity_reranker": { + "retriever": { + "rrf": { + "retrievers": [ + { + "standard":{ + "query":{ + "query_string":{ + "query": "(information retrieval) OR (artificial intelligence)", + "default_field": "text" + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + "field": "text", + "inference_id": "my-rerank-model", + "inference_text": "What are the state of the art applications of AI in information retrieval?" + } + }, + "_source": ["text", "topic"] +} + +---- +//NOTCONSOLE + +[discrete] +[[retrievers-examples-rrf-ranking-on-text-similarity-reranker-results]] +==== Example: RRF with semantic reranker + +For this example, we'll replace our semantic query with the `my-rerank-model` +reranker we previously configured. Since this is a reranker, it needs an initial pool of +documents to work with. In this case, we'll filter for documents about `ai` topics. + +[source,js] +---- +GET /retrievers_example/_search +{ + "retriever": { + "rrf": { + "retrievers": [ + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + }, + { + "text_similarity_reranker": { + "retriever": { + "standard": { + "query": { + "term": { + "topic": "ai" + } + } + } + }, + "field": "text", + "inference_id": "my-rerank-model", + "inference_text": "Can I use generative AI to identify user intent and improve search relevance?" + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + "_source": [ + "text", + "topic" + ] +} +---- +//NOTCONSOLE + +[discrete] +[[retrievers-examples-chaining-text-similarity-reranker-retrievers]] +==== Example: Chaining multiple semantic rerankers + +Full composability means we can chain together multiple retrievers of the same type. For instance, imagine we have a computationally expensive reranker that's specialized for AI content. We can rerank the results of a `text_similarity_reranker` using another `text_similarity_reranker` retriever. Each reranker can operate on different fields and/or use different inference services. + +[source,js] +---- +GET retrievers_example/_search +{ + "retriever": { + "text_similarity_reranker": { + "retriever": { + "text_similarity_reranker": { + "retriever": { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + }, + "rank_window_size": 100, + "field": "text", + "inference_id": "my-rerank-model", + "inference_text": "What are the state of the art applications of AI in information retrieval?" + } + }, + "rank_window_size": 10, + "field": "text", + "inference_id": "my-other-more-expensive-rerank-model", + "inference_text": "Applications of Large Language Models in technology and their impact on user satisfaction" + } + }, + "_source": [ + "text", + "topic" + ] +} +---- +//NOTCONSOLE + + +Note that our example applies two reranking steps. First, we rerank the top 100 +documents from the `knn` search using the `my-rerank-model` reranker. Then we +pick the top 10 results and rerank them using the more fine-grained +`my-other-more-expensive-rerank-model`. + +[discrete] +[[retrievers-examples-rrf-and-aggregations]] +==== Example: Combine RRF with aggregations + +Retrievers support both composability and most of the standard `_search` functionality. For instance, +we can compute aggregations with the `rrf` retriever. When using a compound retriever, +the aggregations are computed based on its nested retrievers. In the following example, +the `terms` aggregation for the `topic` field will include all results, not just the top `rank_window_size`, +from the 2 nested retrievers, i.e. all documents whose `year` field is greater than 2023, and whose `topic` field +matches the term `elastic`. + +[source,js] +---- +GET retrievers_example/_search +{ + "retriever": { + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "range": { + "year": { + "gt": 2023 + } + } + } + } + }, + { + "standard": { + "query": { + "term": { + "topic": "elastic" + } + } + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + "_source": [ + "text", + "topic" + ], + "aggs": { + "topics": { + "terms": { + "field": "topic" + } + } + } +} +---- +//NOTCONSOLE diff --git a/docs/reference/search/search-your-data/retrievers-overview.asciidoc b/docs/reference/search/search-your-data/retrievers-overview.asciidoc index 8e5955fc41782..1771b5bb0d849 100644 --- a/docs/reference/search/search-your-data/retrievers-overview.asciidoc +++ b/docs/reference/search/search-your-data/retrievers-overview.asciidoc @@ -1,5 +1,5 @@ [[retrievers-overview]] -=== Retrievers +== Retrievers A retriever is an abstraction that was added to the Search API in *8.14.0* and was made generally available in *8.16.0*. This abstraction enables the configuration of multi-stage retrieval pipelines within a single `_search` call. @@ -11,7 +11,7 @@ For implementation details, including notable restrictions, check out the [discrete] [[retrievers-overview-types]] -==== Retriever types +=== Retriever types Retrievers come in various types, each tailored for different search operations. The following retrievers are currently available: @@ -34,7 +34,8 @@ Used for <>. Requires first creating a `rerank` task using the <>. [discrete] -==== What makes retrievers useful? +[[retrievers-overview-why-are-they-useful]] +=== What makes retrievers useful? Here's an overview of what makes retrievers useful and how they differ from regular queries. @@ -66,65 +67,90 @@ When using compound retrievers, only the query element is allowed, which enforce [discrete] [[retrievers-overview-example]] -==== Example +=== Example -The following example demonstrates the powerful queries that we can now compose, and how retrievers simplify this process. -We can use any combination of retrievers we want, propagating the results of a nested retriever to its parent. -In this scenario, we'll make use of 4 of our currently available retrievers, i.e. `standard`, `knn`, `text_similarity_reranker` and `rrf`. -See <> for the complete list of available retrievers. - -We'll first combine the results of a `semantic` query using the `standard` retriever, and that of a `knn` search on a dense vector field, using `rrf` to get the top 100 results. -Finally, we'll then rerank the top-50 results of `rrf` using the `text_similarity_reranker` +The following example demonstrates how using retrievers simplify the composability of queries for RRF ranking. [source,js] ---- GET example-index/_search { "retriever": { - "text_similarity_reranker": { - "retriever": { - "rrf": { - "retrievers": [ - { - "standard": { - "query": { - "semantic": { - "field": "inference_field", - "query": "state of the art vector database" - } - } - } - }, - { - "knn": { - "query_vector": [ - 0.54, - ..., - 0.245 - ], - "field": "embedding", - "k": 10, - "num_candidates": 15 + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "sparse_vector": { + "field": "vector.tokens", + "inference_id": "my-elser-endpoint", + "query": "What blue shoes are on sale?" + } + } + } + }, + { + "standard": { + "query": { + "match": { + "text": "blue shoes sale" } } - ], - "rank_window_size": 100, - "rank_constant": 10 + } } - }, - "rank_window_size": 50, - "field": "description", - "inference_text": "what's the best way to create complex pipelines and retrieve documents?", - "inference_id": "my-awesome-rerank-model" + ] } } } ---- //NOTCONSOLE +This example demonstrates how you can combine different retrieval strategies into a single `retriever` pipeline. + +Compare to `RRF` with `sub_searches` approach (which is deprecated as of 8.16.0): + +.*Expand* for example +[%collapsible] +============== + +[source,js] +---- +GET example-index/_search +{ + "sub_searches":[ + { + "query":{ + "match":{ + "text":"blue shoes sale" + } + } + }, + { + "query":{ + "sparse_vector": { + "field": "vector.tokens", + "inference_id": "my-elser-endoint", + "query": "What blue shoes are on sale?" + } + } + } + ], + "rank":{ + "rrf":{ + "rank_window_size":50, + "rank_constant":20 + } + } +} +---- +//NOTCONSOLE +============== + +For more examples on how to use retrievers, please refer to <>. + [discrete] [[retrievers-overview-glossary]] -==== Glossary +=== Glossary Here are some important terms: @@ -143,7 +169,7 @@ Special compound retrievers that reorder hits and may adjust the number of hits, [discrete] [[retrievers-overview-play-in-search]] -==== Retrievers in action +=== Retrievers in action The Search Playground builds Elasticsearch queries using the retriever abstraction. It automatically detects the fields and types in your index and builds a retriever tree based on your selections. @@ -154,6 +180,9 @@ Refer to the {kibana-ref}/playground.html[Playground documentation] for more inf [discrete] [[retrievers-overview-api-reference]] -==== API reference +=== API reference For implementation details, including notable restrictions, check out the <> in the Search API docs. + + +include::retrievers-examples.asciidoc[] diff --git a/docs/reference/search/search-your-data/search-api.asciidoc b/docs/reference/search/search-your-data/search-api.asciidoc index 13cea537ea4fb..a9e74d54dd9d9 100644 --- a/docs/reference/search/search-your-data/search-api.asciidoc +++ b/docs/reference/search/search-your-data/search-api.asciidoc @@ -530,5 +530,4 @@ include::retrieve-inner-hits.asciidoc[] include::search-shard-routing.asciidoc[] include::search-using-query-rules.asciidoc[] include::search-template.asciidoc[] -include::retrievers-overview.asciidoc[] diff --git a/docs/reference/search/search-your-data/search-your-data.asciidoc b/docs/reference/search/search-your-data/search-your-data.asciidoc index cd2b418a7e79b..82541412db4bd 100644 --- a/docs/reference/search/search-your-data/search-your-data.asciidoc +++ b/docs/reference/search/search-your-data/search-your-data.asciidoc @@ -43,6 +43,7 @@ DSL, with a simplified user experience. Create search applications based on your results directly in the Kibana Search UI. include::search-api.asciidoc[] +include::retrievers-overview.asciidoc[] include::knn-search.asciidoc[] include::semantic-search.asciidoc[] include::search-across-clusters.asciidoc[] diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java index c283f9fd93957..c4cdacd135cb4 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java @@ -13,7 +13,7 @@ import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.AggregatorReducer; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -68,7 +68,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public Map getKey() { - return TimeSeriesIdFieldMapper.decodeTsidAsMap(key); + return RoutingPathFields.decodeAsMap(key); } @Override diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java index a2fa617ed902b..c74637330dd7a 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.search.aggregations.AggregationExecutionContext; import org.elasticsearch.search.aggregations.Aggregator; @@ -161,11 +162,11 @@ public void collect(int doc, long bucket) throws IOException { if (currentTsidOrd == aggCtx.getTsidHashOrd()) { tsid = currentTsid; } else { - TimeSeriesIdFieldMapper.TimeSeriesIdBuilder tsidBuilder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + RoutingPathFields routingPathFields = new RoutingPathFields(null); for (TsidConsumer consumer : dimensionConsumers.values()) { - consumer.accept(doc, tsidBuilder); + consumer.accept(doc, routingPathFields); } - currentTsid = tsid = tsidBuilder.buildLegacyTsid().toBytesRef(); + currentTsid = tsid = TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef(); } long bucketOrdinal = bucketOrds.add(bucket, tsid); if (bucketOrdinal < 0) { // already seen @@ -189,6 +190,6 @@ InternalTimeSeries buildResult(InternalTimeSeries.InternalBucket[] topBuckets) { @FunctionalInterface interface TsidConsumer { - void accept(int docId, TimeSeriesIdFieldMapper.TimeSeriesIdBuilder tsidBuilder) throws IOException; + void accept(int docId, RoutingPathFields routingFields) throws IOException; } } diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java index be841da07ada9..e61c02e0b9cd2 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.aggregations.AggregationReduceContext; @@ -42,12 +43,12 @@ private List randomBuckets(boolean keyed, InternalAggregations a List> keys = randomKeys(bucketKeys(randomIntBetween(1, 4)), numberOfBuckets); for (int j = 0; j < numberOfBuckets; j++) { long docCount = randomLongBetween(0, Long.MAX_VALUE / (20L * numberOfBuckets)); - var builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + var routingPathFields = new RoutingPathFields(null); for (var entry : keys.get(j).entrySet()) { - builder.addString(entry.getKey(), (String) entry.getValue()); + routingPathFields.addString(entry.getKey(), (String) entry.getValue()); } try { - var key = builder.buildLegacyTsid().toBytesRef(); + var key = TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef(); bucketList.add(new InternalBucket(key, docCount, aggregations, keyed)); } catch (IOException e) { throw new UncheckedIOException(e); diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java index 26611127a94df..d9a4023457126 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java @@ -30,8 +30,8 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; -import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper.TimeSeriesIdBuilder; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -93,10 +93,10 @@ public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimens final List fields = new ArrayList<>(); fields.add(new SortedNumericDocValuesField(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); fields.add(new LongPoint(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); - final TimeSeriesIdBuilder builder = new TimeSeriesIdBuilder(null); + RoutingPathFields routingPathFields = new RoutingPathFields(null); for (int i = 0; i < dimensions.length; i += 2) { if (dimensions[i + 1] instanceof Number n) { - builder.addLong(dimensions[i].toString(), n.longValue()); + routingPathFields.addLong(dimensions[i].toString(), n.longValue()); if (dimensions[i + 1] instanceof Integer || dimensions[i + 1] instanceof Long) { fields.add(new NumericDocValuesField(dimensions[i].toString(), ((Number) dimensions[i + 1]).longValue())); } else if (dimensions[i + 1] instanceof Float) { @@ -105,7 +105,7 @@ public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimens fields.add(new DoubleDocValuesField(dimensions[i].toString(), (double) dimensions[i + 1])); } } else { - builder.addString(dimensions[i].toString(), dimensions[i + 1].toString()); + routingPathFields.addString(dimensions[i].toString(), dimensions[i + 1].toString()); fields.add(new SortedSetDocValuesField(dimensions[i].toString(), new BytesRef(dimensions[i + 1].toString()))); } } @@ -118,7 +118,9 @@ public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimens fields.add(new DoubleDocValuesField(metrics[i].toString(), (double) metrics[i + 1])); } } - fields.add(new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, builder.buildLegacyTsid().toBytesRef())); + fields.add( + new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef()) + ); iw.addDocument(fields); } diff --git a/muted-tests.yml b/muted-tests.yml index 1b768222f8bae..60776e9f73868 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -5,9 +5,6 @@ tests: - class: "org.elasticsearch.client.RestClientSingleHostIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/102717" method: "testRequestResetAndAbort" -- class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" - issue: "https://github.com/elastic/elasticsearch/issues/110408" - method: "testCreateAndRestorePartialSearchableSnapshot" - class: org.elasticsearch.xpack.restart.FullClusterRestartIT method: testSingleDoc {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111434 @@ -127,18 +124,9 @@ tests: - class: org.elasticsearch.xpack.shutdown.NodeShutdownIT method: testStalledShardMigrationProperlyDetected issue: https://github.com/elastic/elasticsearch/issues/115697 -- class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT - method: testGeoShapeGeoHash - issue: https://github.com/elastic/elasticsearch/issues/115664 - class: org.elasticsearch.xpack.inference.InferenceCrudIT method: testSupportedStream issue: https://github.com/elastic/elasticsearch/issues/113430 -- class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT - method: testGeoShapeGeoTile - issue: https://github.com/elastic/elasticsearch/issues/115717 -- class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT - method: testGeoShapeGeoHex - issue: https://github.com/elastic/elasticsearch/issues/115705 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/transforms_start_stop/Verify start transform reuses destination index} issue: https://github.com/elastic/elasticsearch/issues/115808 @@ -157,32 +145,14 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=ml/inference_crud/Test delete given model referenced by pipeline} issue: https://github.com/elastic/elasticsearch/issues/115970 -- class: org.elasticsearch.search.slice.SearchSliceIT - method: testPointInTime - issue: https://github.com/elastic/elasticsearch/issues/115988 -- class: org.elasticsearch.action.search.PointInTimeIT - method: testPITTiebreak - issue: https://github.com/elastic/elasticsearch/issues/115810 - class: org.elasticsearch.index.reindex.ReindexNodeShutdownIT method: testReindexWithShutdown issue: https://github.com/elastic/elasticsearch/issues/115996 - class: org.elasticsearch.search.query.SearchQueryIT method: testAllDocsQueryString issue: https://github.com/elastic/elasticsearch/issues/115728 -- class: org.elasticsearch.search.basic.SearchWithRandomExceptionsIT - method: testRandomExceptions - issue: https://github.com/elastic/elasticsearch/issues/116027 -- class: org.elasticsearch.action.admin.HotThreadsIT - method: testHotThreadsDontFail - issue: https://github.com/elastic/elasticsearch/issues/115754 -- class: org.elasticsearch.search.functionscore.QueryRescorerIT - method: testScoring - issue: https://github.com/elastic/elasticsearch/issues/116050 - class: org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests issue: https://github.com/elastic/elasticsearch/issues/116087 -- class: org.elasticsearch.xpack.searchbusinessrules.PinnedQueryBuilderIT - method: testPinnedPromotions - issue: https://github.com/elastic/elasticsearch/issues/116097 - class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT method: test {p0=cat.shards/10_basic/Help} issue: https://github.com/elastic/elasticsearch/issues/116110 @@ -195,9 +165,6 @@ tests: - class: org.elasticsearch.upgrades.FullClusterRestartIT method: testSnapshotRestore {cluster=OLD} issue: https://github.com/elastic/elasticsearch/issues/111777 -- class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT - method: testGeoPointGeoTile - issue: https://github.com/elastic/elasticsearch/issues/115818 - class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT method: testLookbackWithIndicesOptions issue: https://github.com/elastic/elasticsearch/issues/116127 @@ -261,9 +228,6 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {categorize.Categorize ASYNC} issue: https://github.com/elastic/elasticsearch/issues/116373 -- class: org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshotsIntegTests - method: testCreateAndRestoreSearchableSnapshot - issue: https://github.com/elastic/elasticsearch/issues/116377 - class: org.elasticsearch.threadpool.SimpleThreadPoolIT method: testThreadPoolMetrics issue: https://github.com/elastic/elasticsearch/issues/108320 @@ -293,6 +257,15 @@ tests: - class: org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshotsCanMatchOnCoordinatorIntegTests method: testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQueryingAnyNodeWhenTheyAreOutsideOfTheQueryRange issue: https://github.com/elastic/elasticsearch/issues/116523 +- class: org.elasticsearch.xpack.logsdb.qa.StandardVersusLogsIndexModeRandomDataDynamicMappingChallengeRestIT + method: testMatchAllQuery + issue: https://github.com/elastic/elasticsearch/issues/116536 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=ml/inference_crud/Test force delete given model referenced by pipeline} + issue: https://github.com/elastic/elasticsearch/issues/116555 +- class: org.elasticsearch.smoketest.MlWithSecurityIT + method: test {yaml=ml/data_frame_analytics_crud/Test delete given stopped config} + issue: https://github.com/elastic/elasticsearch/issues/116608 # Examples: # diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java index 37fbc95d56506..84abb57b7821e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.index.mapper.DocumentParsingException; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.util.ArrayList; import java.util.List; @@ -28,7 +27,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicIntegerArray; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -42,7 +41,6 @@ public class IndexActionIT extends ESIntegTestCase { public void testAutoGenerateIdNoDuplicates() throws Exception { int numberOfIterations = scaledRandomIntBetween(10, 50); for (int i = 0; i < numberOfIterations; i++) { - Exception firstError = null; createIndex("test"); int numOfDocs = randomIntBetween(10, 100); logger.info("indexing [{}] docs", numOfDocs); @@ -52,51 +50,9 @@ public void testAutoGenerateIdNoDuplicates() throws Exception { } indexRandom(true, builders); logger.info("verifying indexed content"); - int numOfChecks = randomIntBetween(8, 12); + int numOfChecks = randomIntBetween(16, 24); for (int j = 0; j < numOfChecks; j++) { - try { - logger.debug("running search with all types"); - assertResponse(prepareSearch("test"), response -> { - if (response.getHits().getTotalHits().value() != numOfDocs) { - final String message = "Count is " - + response.getHits().getTotalHits().value() - + " but " - + numOfDocs - + " was expected. " - + ElasticsearchAssertions.formatShardStatus(response); - logger.error("{}. search response: \n{}", message, response); - fail(message); - } - }); - } catch (Exception e) { - logger.error("search for all docs types failed", e); - if (firstError == null) { - firstError = e; - } - } - try { - logger.debug("running search with a specific type"); - assertResponse(prepareSearch("test"), response -> { - if (response.getHits().getTotalHits().value() != numOfDocs) { - final String message = "Count is " - + response.getHits().getTotalHits().value() - + " but " - + numOfDocs - + " was expected. " - + ElasticsearchAssertions.formatShardStatus(response); - logger.error("{}. search response: \n{}", message, response); - fail(message); - } - }); - } catch (Exception e) { - logger.error("search for all docs of a specific type failed", e); - if (firstError == null) { - firstError = e; - } - } - } - if (firstError != null) { - fail(firstError.getMessage()); + assertHitCount(prepareSearch("test"), numOfDocs); } internalCluster().wipeIndices("test"); } @@ -147,16 +103,13 @@ public void testCreatedFlagParallelExecution() throws Exception { List> tasks = new ArrayList<>(taskCount); final Random random = random(); for (int i = 0; i < taskCount; i++) { - tasks.add(new Callable() { - @Override - public Void call() throws Exception { - int docId = random.nextInt(docCount); - DocWriteResponse indexResponse = indexDoc("test", Integer.toString(docId), "field1", "value"); - if (indexResponse.getResult() == DocWriteResponse.Result.CREATED) { - createdCounts.incrementAndGet(docId); - } - return null; + tasks.add(() -> { + int docId = random.nextInt(docCount); + DocWriteResponse indexResponse = indexDoc("test", Integer.toString(docId), "field1", "value"); + if (indexResponse.getResult() == DocWriteResponse.Result.CREATED) { + createdCounts.incrementAndGet(docId); } + return null; }); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java index 45e370a2e2252..8e0dee2396411 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.reservedstate.service; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.template.get.GetComponentTemplateAction; @@ -26,16 +27,12 @@ import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.io.ByteArrayInputStream; import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -54,6 +51,7 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +@LuceneTestCase.SuppressFileSystems("*") public class ComponentTemplatesFileSettingsIT extends ESIntegTestCase { private static AtomicLong versionCounter = new AtomicLong(1); @@ -365,15 +363,7 @@ private void assertMasterNode(Client client, String node) throws ExecutionExcept } private void writeJSONFile(String node, String json) throws Exception { - long version = versionCounter.incrementAndGet(); - - FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); - - Files.createDirectories(fileSettingsService.watchedFileDir()); - Path tempFilePath = createTempFile(); - - Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); - Files.move(tempFilePath, fileSettingsService.watchedFile(), StandardCopyOption.ATOMIC_MOVE); + FileSettingsServiceIT.writeJSONFile(node, json, logger, versionCounter.incrementAndGet()); } private Tuple setupClusterStateListener(String node) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java index f9122ccfb4a3e..90326abb381d0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java @@ -10,6 +10,7 @@ package org.elasticsearch.reservedstate.service; import org.apache.logging.log4j.Logger; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -20,6 +21,7 @@ import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; @@ -27,7 +29,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.junit.Before; -import java.nio.charset.StandardCharsets; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; @@ -50,6 +52,7 @@ import static org.hamcrest.Matchers.nullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +@LuceneTestCase.SuppressFileSystems("*") public class FileSettingsServiceIT extends ESIntegTestCase { private final AtomicLong versionCounter = new AtomicLong(1); @@ -129,29 +132,37 @@ private void assertMasterNode(Client client, String node) { ); } - public static void writeJSONFile(String node, String json, AtomicLong versionCounter, Logger logger, boolean incrementVersion) - throws Exception { - long version = incrementVersion ? versionCounter.incrementAndGet() : versionCounter.get(); - + public static void writeJSONFile(String node, String json, Logger logger, Long version) throws Exception { FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); Files.createDirectories(fileSettingsService.watchedFileDir()); Path tempFilePath = createTempFile(); - String settingsFileContent = Strings.format(json, version); - Files.write(tempFilePath, settingsFileContent.getBytes(StandardCharsets.UTF_8)); - logger.info("--> Before writing new settings file with version [{}]", version); - Files.move(tempFilePath, fileSettingsService.watchedFile(), StandardCopyOption.ATOMIC_MOVE); - logger.info("--> After writing new settings file: [{}]", settingsFileContent); - } - - public static void writeJSONFile(String node, String json, AtomicLong versionCounter, Logger logger) throws Exception { - writeJSONFile(node, json, versionCounter, logger, true); + String jsonWithVersion = Strings.format(json, version); + logger.info("--> before writing JSON config to node {} with path {}", node, tempFilePath); + logger.info(jsonWithVersion); + + Files.writeString(tempFilePath, jsonWithVersion); + int retryCount = 0; + do { + try { + // this can fail on Windows because of timing + Files.move(tempFilePath, fileSettingsService.watchedFile(), StandardCopyOption.ATOMIC_MOVE); + logger.info("--> after writing JSON config to node {} with path {}", node, tempFilePath); + return; + } catch (IOException e) { + logger.info("--> retrying writing a settings file [{}]", retryCount); + if (retryCount == 4) { // retry 5 times + throw e; + } + Thread.sleep(retryDelay(retryCount)); + retryCount++; + } + } while (true); } - public static void writeJSONFileWithoutVersionIncrement(String node, String json, AtomicLong versionCounter, Logger logger) - throws Exception { - writeJSONFile(node, json, versionCounter, logger, false); + private static long retryDelay(int retryCount) { + return 100 * (1 << retryCount) + Randomness.get().nextInt(10); } private Tuple setupCleanupClusterStateListener(String node) { @@ -245,7 +256,7 @@ public void testSettingsApplied() throws Exception { assertTrue(masterFileSettingsService.watching()); assertFalse(dataFileSettingsService.watching()); - writeJSONFile(masterNode, testJSON, versionCounter, logger); + writeJSONFile(masterNode, testJSON, logger, versionCounter.incrementAndGet()); assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2(), "50mb"); } @@ -260,7 +271,7 @@ public void testSettingsAppliedOnStart() throws Exception { // In internal cluster tests, the nodes share the config directory, so when we write with the data node path // the master will pick it up on start - writeJSONFile(dataNode, testJSON, versionCounter, logger); + writeJSONFile(dataNode, testJSON, logger, versionCounter.incrementAndGet()); logger.info("--> start master node"); final String masterNode = internalCluster().startMasterOnlyNode(); @@ -288,7 +299,7 @@ public void testReservedStatePersistsOnRestart() throws Exception { assertBusy(() -> assertTrue(masterFileSettingsService.watching())); logger.info("--> write some settings"); - writeJSONFile(masterNode, testJSON, versionCounter, logger); + writeJSONFile(masterNode, testJSON, logger, versionCounter.incrementAndGet()); assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2(), "50mb"); logger.info("--> restart master"); @@ -366,7 +377,7 @@ public void testErrorSaved() throws Exception { assertTrue(masterFileSettingsService.watching()); assertFalse(dataFileSettingsService.watching()); - writeJSONFile(masterNode, testErrorJSON, versionCounter, logger); + writeJSONFile(masterNode, testErrorJSON, logger, versionCounter.incrementAndGet()); assertClusterStateNotSaved(savedClusterState.v1(), savedClusterState.v2()); } @@ -390,14 +401,14 @@ public void testErrorCanRecoverOnRestart() throws Exception { assertTrue(masterFileSettingsService.watching()); assertFalse(dataFileSettingsService.watching()); - writeJSONFile(masterNode, testErrorJSON, versionCounter, logger); + writeJSONFile(masterNode, testErrorJSON, logger, versionCounter.incrementAndGet()); AtomicLong metadataVersion = savedClusterState.v2(); assertClusterStateNotSaved(savedClusterState.v1(), metadataVersion); assertHasErrors(metadataVersion, "not_cluster_settings"); // write valid json without version increment to simulate ES being able to process settings after a restart (usually, this would be // due to a code change) - writeJSONFileWithoutVersionIncrement(masterNode, testJSON, versionCounter, logger); + writeJSONFile(masterNode, testJSON, logger, versionCounter.get()); internalCluster().restartNode(masterNode); ensureGreen(); @@ -426,14 +437,14 @@ public void testNewErrorOnRestartReprocessing() throws Exception { assertTrue(masterFileSettingsService.watching()); assertFalse(dataFileSettingsService.watching()); - writeJSONFile(masterNode, testErrorJSON, versionCounter, logger); + writeJSONFile(masterNode, testErrorJSON, logger, versionCounter.incrementAndGet()); AtomicLong metadataVersion = savedClusterState.v2(); assertClusterStateNotSaved(savedClusterState.v1(), metadataVersion); assertHasErrors(metadataVersion, "not_cluster_settings"); // write json with new error without version increment to simulate ES failing to process settings after a restart for a new reason // (usually, this would be due to a code change) - writeJSONFileWithoutVersionIncrement(masterNode, testOtherErrorJSON, versionCounter, logger); + writeJSONFile(masterNode, testOtherErrorJSON, logger, versionCounter.get()); assertHasErrors(metadataVersion, "not_cluster_settings"); internalCluster().restartNode(masterNode); ensureGreen(); @@ -461,7 +472,7 @@ public void testSettingsAppliedOnMasterReElection() throws Exception { assertTrue(masterFileSettingsService.watching()); - writeJSONFile(masterNode, testJSON, versionCounter, logger); + writeJSONFile(masterNode, testJSON, logger, versionCounter.incrementAndGet()); assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2(), "50mb"); internalCluster().stopCurrentMasterNode(); @@ -476,13 +487,13 @@ public void testSettingsAppliedOnMasterReElection() throws Exception { ensureStableCluster(3); savedClusterState = setupCleanupClusterStateListener(internalCluster().getMasterName()); - writeJSONFile(internalCluster().getMasterName(), testCleanupJSON, versionCounter, logger); + writeJSONFile(internalCluster().getMasterName(), testCleanupJSON, logger, versionCounter.incrementAndGet()); boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); savedClusterState = setupClusterStateListener(internalCluster().getMasterName()); - writeJSONFile(internalCluster().getMasterName(), testJSON43mb, versionCounter, logger); + writeJSONFile(internalCluster().getMasterName(), testJSON43mb, logger, versionCounter.incrementAndGet()); assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2(), "43mb"); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java index 54ba74a62890d..7b284979611e2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.reservedstate.service; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; @@ -22,7 +23,6 @@ import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.test.ESIntegTestCase; @@ -30,9 +30,6 @@ import java.io.ByteArrayInputStream; import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -49,6 +46,7 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +@LuceneTestCase.SuppressFileSystems("*") public class RepositoriesFileSettingsIT extends ESIntegTestCase { private static AtomicLong versionCounter = new AtomicLong(1); @@ -102,15 +100,7 @@ private void assertMasterNode(Client client, String node) throws ExecutionExcept } private void writeJSONFile(String node, String json) throws Exception { - long version = versionCounter.incrementAndGet(); - - FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); - - Files.createDirectories(fileSettingsService.watchedFileDir()); - Path tempFilePath = createTempFile(); - - Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); - Files.move(tempFilePath, fileSettingsService.watchedFile(), StandardCopyOption.ATOMIC_MOVE); + FileSettingsServiceIT.writeJSONFile(node, json, logger, versionCounter.incrementAndGet()); } private Tuple setupClusterStateListener(String node) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java index 7e13402b7e66a..7d47ed391199c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java @@ -19,9 +19,7 @@ import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Randomness; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; @@ -29,11 +27,7 @@ import org.elasticsearch.snapshots.SnapshotState; import org.junit.After; -import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; @@ -78,34 +72,8 @@ public void cleanUp() throws Exception { awaitNoMoreRunningOperations(); } - private long retryDelay(int retryCount) { - return 100 * (1 << retryCount) + Randomness.get().nextInt(10); - } - private void writeJSONFile(String node, String json) throws Exception { - long version = versionCounter.incrementAndGet(); - - FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); - - Files.createDirectories(fileSettingsService.watchedFileDir()); - Path tempFilePath = createTempFile(); - - Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); - int retryCount = 0; - do { - try { - // this can fail on Windows because of timing - Files.move(tempFilePath, fileSettingsService.watchedFile(), StandardCopyOption.ATOMIC_MOVE); - return; - } catch (IOException e) { - logger.info("--> retrying writing a settings file [" + retryCount + "]"); - if (retryCount == 4) { // retry 5 times - throw e; - } - Thread.sleep(retryDelay(retryCount)); - retryCount++; - } - } while (true); + FileSettingsServiceIT.writeJSONFile(node, json, logger, versionCounter.incrementAndGet()); } private Tuple setupClusterStateListener(String node) { diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 3134eb4966115..5f3b466f9f7bd 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -175,6 +175,7 @@ static TransportVersion def(int id) { public static final TransportVersion ML_INFERENCE_ATTACH_TO_EXISTSING_DEPLOYMENT = def(8_771_00_0); public static final TransportVersion CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY = def(8_772_00_0); public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16 = def(8_772_00_1); + public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO_BACKPORT_8_16 = def(8_772_00_2); public static final TransportVersion REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_773_00_0); public static final TransportVersion REVERT_REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_774_00_0); public static final TransportVersion ESQL_FIELD_ATTRIBUTE_PARENT_SIMPLIFIED = def(8_775_00_0); @@ -190,6 +191,8 @@ static TransportVersion def(int id) { public static final TransportVersion LOGSDB_TELEMETRY_STATS = def(8_785_00_0); public static final TransportVersion KQL_QUERY_ADDED = def(8_786_00_0); public static final TransportVersion ROLE_MONITOR_STATS = def(8_787_00_0); + public static final TransportVersion DATA_STREAM_INDEX_VERSION_DEPRECATION_CHECK = def(8_788_00_0); + public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO = def(8_789_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java index 71e3185329ed3..a7d92682b763c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java @@ -15,6 +15,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -22,6 +23,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.http.HttpInfo; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.ingest.IngestInfo; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.os.OsInfo; @@ -42,7 +44,7 @@ public class NodeInfo extends BaseNodeResponse { private final String version; - private final TransportVersion transportVersion; + private final CompatibilityVersions compatibilityVersions; private final IndexVersion indexVersion; private final Map componentVersions; private final Build build; @@ -64,15 +66,20 @@ public NodeInfo(StreamInput in) throws IOException { super(in); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { version = in.readString(); - transportVersion = TransportVersion.readVersion(in); + if (in.getTransportVersion().isPatchFrom(TransportVersions.ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO_BACKPORT_8_16) + || in.getTransportVersion().onOrAfter(TransportVersions.ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO)) { + compatibilityVersions = CompatibilityVersions.readVersion(in); + } else { + compatibilityVersions = new CompatibilityVersions(TransportVersion.readVersion(in), Map.of()); // unknown mappings versions + } indexVersion = IndexVersion.readVersion(in); } else { Version legacyVersion = Version.readVersion(in); version = legacyVersion.toString(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { - transportVersion = TransportVersion.readVersion(in); + compatibilityVersions = new CompatibilityVersions(TransportVersion.readVersion(in), Map.of()); // unknown mappings versions } else { - transportVersion = TransportVersion.fromId(legacyVersion.id); + compatibilityVersions = new CompatibilityVersions(TransportVersion.fromId(legacyVersion.id), Map.of()); } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_11_X)) { indexVersion = IndexVersion.readVersion(in); @@ -114,7 +121,7 @@ public NodeInfo(StreamInput in) throws IOException { public NodeInfo( String version, - TransportVersion transportVersion, + CompatibilityVersions compatibilityVersions, IndexVersion indexVersion, Map componentVersions, Build build, @@ -134,7 +141,7 @@ public NodeInfo( ) { super(node); this.version = version; - this.transportVersion = transportVersion; + this.compatibilityVersions = compatibilityVersions; this.indexVersion = indexVersion; this.componentVersions = componentVersions; this.build = build; @@ -171,7 +178,7 @@ public String getVersion() { * The most recent transport version that can be used by this node */ public TransportVersion getTransportVersion() { - return transportVersion; + return compatibilityVersions.transportVersion(); } /** @@ -188,6 +195,13 @@ public Map getComponentVersions() { return componentVersions; } + /** + * A map of system index names to versions for their mappings supported by this node. + */ + public Map getCompatibilityVersions() { + return compatibilityVersions.systemIndexMappingsVersion(); + } + /** * The build version of the node. */ @@ -240,8 +254,11 @@ public void writeTo(StreamOutput out) throws IOException { } else { Version.writeVersion(Version.fromString(version), out); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { - TransportVersion.writeVersion(transportVersion, out); + if (out.getTransportVersion().isPatchFrom(TransportVersions.ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO_BACKPORT_8_16) + || out.getTransportVersion().onOrAfter(TransportVersions.ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO)) { + compatibilityVersions.writeTo(out); + } else if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { + TransportVersion.writeVersion(compatibilityVersions.transportVersion(), out); } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_11_X)) { IndexVersion.writeVersion(indexVersion, out); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index 010f96f212116..eee65134eae33 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; @@ -257,16 +256,17 @@ protected void executePhaseOnShard( SearchShardTarget shard, SearchActionListener phaseListener ) { - final ShardOpenReaderRequest shardRequest = new ShardOpenReaderRequest( - shardIt.shardId(), - shardIt.getOriginalIndices(), - pitRequest.keepAlive() - ); - Transport.Connection connection = connectionLookup.apply(shardIt.getClusterAlias(), shard.getNodeId()); + final Transport.Connection connection; + try { + connection = connectionLookup.apply(shardIt.getClusterAlias(), shard.getNodeId()); + } catch (Exception e) { + phaseListener.onFailure(e); + return; + } transportService.sendChildRequest( connection, OPEN_SHARD_READER_CONTEXT_NAME, - shardRequest, + new ShardOpenReaderRequest(shardIt.shardId(), shardIt.getOriginalIndices(), pitRequest.keepAlive()), task, new ActionListenerResponseHandler<>( phaseListener, @@ -279,29 +279,9 @@ protected void executePhaseOnShard( @Override protected SearchPhase getNextPhase() { return new SearchPhase(getName()) { - - private void onExecuteFailure(Exception e) { - onPhaseFailure(this, "sending response failed", e); - } - @Override public void run() { - execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - onExecuteFailure(e); - } - - @Override - protected void doRun() { - sendSearchResponse(SearchResponseSections.EMPTY_WITH_TOTAL_HITS, results.getAtomicArray()); - } - - @Override - public boolean isForceExecution() { - return true; // we already created the PIT, no sense in rejecting the task that sends the response. - } - }); + sendSearchResponse(SearchResponseSections.EMPTY_WITH_TOTAL_HITS, results.getAtomicArray()); } }; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 39499253c8790..bf80c38d64a4e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -402,8 +402,10 @@ Index[] concreteIndices(Context context, String... indexExpressions) { resolveIndicesForDataStream(context, dataStream, concreteIndicesResult); } } else { - for (Index index : indexAbstraction.getIndices()) { - if (shouldTrackConcreteIndex(context, context.getOptions(), index)) { + List indices = indexAbstraction.getIndices(); + for (int i = 0, n = indices.size(); i < n; i++) { + Index index = indices.get(i); + if (shouldTrackConcreteIndex(context, index)) { concreteIndicesResult.add(index); } } @@ -421,7 +423,7 @@ Index[] concreteIndices(Context context, String... indexExpressions) { private static void resolveIndicesForDataStream(Context context, DataStream dataStream, Set concreteIndicesResult) { if (shouldIncludeRegularIndices(context.getOptions())) { for (Index index : dataStream.getIndices()) { - if (shouldTrackConcreteIndex(context, context.getOptions(), index)) { + if (shouldTrackConcreteIndex(context, index)) { concreteIndicesResult.add(index); } } @@ -430,7 +432,7 @@ private static void resolveIndicesForDataStream(Context context, DataStream data // We short-circuit here, if failure indices are not allowed and they can be skipped if (context.getOptions().allowFailureIndices() || context.getOptions().ignoreUnavailable() == false) { for (Index index : dataStream.getFailureIndices().getIndices()) { - if (shouldTrackConcreteIndex(context, context.getOptions(), index)) { + if (shouldTrackConcreteIndex(context, index)) { concreteIndicesResult.add(index); } } @@ -565,7 +567,7 @@ private static IndexNotFoundException notFoundException(String... indexExpressio return infe; } - private static boolean shouldTrackConcreteIndex(Context context, IndicesOptions options, Index index) { + private static boolean shouldTrackConcreteIndex(Context context, Index index) { if (context.systemIndexAccessLevel == SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY && context.netNewSystemIndexPredicate.test(index.getName())) { // Exclude this one as it's a net-new system index, and we explicitly don't want those. @@ -575,7 +577,7 @@ private static boolean shouldTrackConcreteIndex(Context context, IndicesOptions DataStream parentDataStream = context.getState().metadata().getIndicesLookup().get(index.getName()).getParentDataStream(); if (parentDataStream != null && parentDataStream.isFailureStoreEnabled()) { if (parentDataStream.isFailureStoreIndex(index.getName())) { - if (options.ignoreUnavailable()) { + if (context.options.ignoreUnavailable()) { return false; } else { throw new FailureIndexNotSupportedException(index); @@ -585,6 +587,7 @@ private static boolean shouldTrackConcreteIndex(Context context, IndicesOptions } final IndexMetadata imd = context.state.metadata().index(index); if (imd.getState() == IndexMetadata.State.CLOSE) { + IndicesOptions options = context.options; if (options.forbidClosedIndices() && options.ignoreUnavailable() == false) { throw new IndexClosedException(index); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java index 42240a996c531..682dc85ccd00f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java @@ -26,7 +26,6 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.HashMap; @@ -74,11 +73,7 @@ public class DesiredBalanceComputer { private TimeValue progressLogInterval; private long maxBalanceComputationTimeDuringIndexCreationMillis; - public DesiredBalanceComputer(ClusterSettings clusterSettings, ThreadPool threadPool, ShardsAllocator delegateAllocator) { - this(clusterSettings, delegateAllocator, threadPool::relativeTimeInMillis); - } - - DesiredBalanceComputer(ClusterSettings clusterSettings, ShardsAllocator delegateAllocator, LongSupplier timeSupplierMillis) { + public DesiredBalanceComputer(ClusterSettings clusterSettings, LongSupplier timeSupplierMillis, ShardsAllocator delegateAllocator) { this.delegateAllocator = delegateAllocator; this.timeSupplierMillis = timeSupplierMillis; clusterSettings.initializeAndWatch(PROGRESS_LOG_INTERVAL_SETTING, value -> this.progressLogInterval = value); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index 0cfb3af87f012..5ccb59e29d7dc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -91,7 +91,7 @@ public DesiredBalanceShardsAllocator( delegateAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator), + new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, delegateAllocator), reconciler, telemetryProvider ); diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index e6339344b6e5f..f5f923f3657f8 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -23,7 +23,6 @@ import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.index.mapper.DocumentDimensions; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; @@ -33,6 +32,8 @@ import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.mapper.ProvidedIdFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; +import org.elasticsearch.index.mapper.RoutingFields; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.mapper.TimeSeriesRoutingHashFieldMapper; @@ -111,8 +112,8 @@ public IdFieldMapper buildIdFieldMapper(BooleanSupplier fieldDataEnabled) { } @Override - public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { - return DocumentDimensions.Noop.INSTANCE; + public RoutingFields buildRoutingFields(IndexSettings settings) { + return RoutingFields.Noop.INSTANCE; } @Override @@ -209,9 +210,9 @@ public IdFieldMapper buildIdFieldMapper(BooleanSupplier fieldDataEnabled) { } @Override - public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { + public RoutingFields buildRoutingFields(IndexSettings settings) { IndexRouting.ExtractFromSource routing = (IndexRouting.ExtractFromSource) settings.getIndexRouting(); - return new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(routing.builder()); + return new RoutingPathFields(routing.builder()); } @Override @@ -287,8 +288,8 @@ public MetadataFieldMapper timeSeriesRoutingHashFieldMapper() { } @Override - public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { - return DocumentDimensions.Noop.INSTANCE; + public RoutingFields buildRoutingFields(IndexSettings settings) { + return RoutingFields.Noop.INSTANCE; } @Override @@ -368,8 +369,8 @@ public IdFieldMapper buildIdFieldMapper(BooleanSupplier fieldDataEnabled) { } @Override - public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { - return DocumentDimensions.Noop.INSTANCE; + public RoutingFields buildRoutingFields(IndexSettings settings) { + return RoutingFields.Noop.INSTANCE; } @Override @@ -524,7 +525,7 @@ public String getName() { /** * How {@code time_series_dimension} fields are handled by indices in this mode. */ - public abstract DocumentDimensions buildDocumentDimensions(IndexSettings settings); + public abstract RoutingFields buildRoutingFields(IndexSettings settings); /** * @return Whether timestamps should be validated for being withing the time range of an index. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index 5aaaf7dce83c9..f74d58093a7f5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -499,7 +499,7 @@ private void indexValue(DocumentParserContext context, Boolean value) { } if (fieldType().isDimension()) { - context.getDimensions().addBoolean(fieldType().name(), value).validate(context.indexSettings()); + context.getRoutingFields().addBoolean(fieldType().name(), value); } if (indexed) { context.doc().add(new StringField(fieldType().name(), value ? Values.TRUE : Values.FALSE, Field.Store.NO)); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentDimensions.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentDimensions.java deleted file mode 100644 index 8f26d21324d9b..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentDimensions.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.index.mapper; - -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.index.IndexSettings; - -import java.net.InetAddress; - -/** - * Collects dimensions from documents. - */ -public interface DocumentDimensions { - - /** - * Build an index's DocumentDimensions using its settings - */ - static DocumentDimensions fromIndexSettings(IndexSettings indexSettings) { - return indexSettings.getMode().buildDocumentDimensions(indexSettings); - } - - /** - * This overloaded method tries to take advantage of the fact that the UTF-8 - * value is already computed in some cases when we want to collect - * dimensions, so we can save re-computing the UTF-8 encoding. - */ - DocumentDimensions addString(String fieldName, BytesRef utf8Value); - - default DocumentDimensions addString(String fieldName, String value) { - return addString(fieldName, new BytesRef(value)); - } - - DocumentDimensions addIp(String fieldName, InetAddress value); - - DocumentDimensions addLong(String fieldName, long value); - - DocumentDimensions addUnsignedLong(String fieldName, long value); - - DocumentDimensions addBoolean(String fieldName, boolean value); - - DocumentDimensions validate(IndexSettings settings); - - /** - * Noop implementation that doesn't perform validations on dimension fields - */ - enum Noop implements DocumentDimensions { - - INSTANCE; - - @Override - public DocumentDimensions addString(String fieldName, BytesRef utf8Value) { - return this; - } - - @Override - public DocumentDimensions addString(String fieldName, String value) { - return this; - } - - @Override - public DocumentDimensions addIp(String fieldName, InetAddress value) { - return this; - } - - @Override - public DocumentDimensions addLong(String fieldName, long value) { - return this; - } - - @Override - public DocumentDimensions addUnsignedLong(String fieldName, long value) { - return this; - } - - @Override - public DocumentDimensions addBoolean(String fieldName, boolean value) { - return this; - } - - @Override - public DocumentDimensions validate(IndexSettings settings) { - return this; - } - } -} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index c84df68a637e2..51e4e9f4c1b5e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -126,7 +126,7 @@ private enum Scope { private final DynamicMapperSize dynamicMappersSize; private final Map dynamicObjectMappers; private final Map> dynamicRuntimeFields; - private final DocumentDimensions dimensions; + private final RoutingFields routingFields; private final ObjectMapper parent; private final ObjectMapper.Dynamic dynamic; private String id; @@ -158,7 +158,7 @@ private DocumentParserContext( String id, Field version, SeqNoFieldMapper.SequenceIDFields seqID, - DocumentDimensions dimensions, + RoutingFields routingFields, ObjectMapper parent, ObjectMapper.Dynamic dynamic, Set fieldsAppliedFromTemplates, @@ -178,7 +178,7 @@ private DocumentParserContext( this.id = id; this.version = version; this.seqID = seqID; - this.dimensions = dimensions; + this.routingFields = routingFields; this.parent = parent; this.dynamic = dynamic; this.fieldsAppliedFromTemplates = fieldsAppliedFromTemplates; @@ -201,7 +201,7 @@ private DocumentParserContext(ObjectMapper parent, ObjectMapper.Dynamic dynamic, in.id, in.version, in.seqID, - in.dimensions, + in.routingFields, parent, dynamic, in.fieldsAppliedFromTemplates, @@ -231,7 +231,7 @@ protected DocumentParserContext( null, null, SeqNoFieldMapper.SequenceIDFields.emptySeqID(), - DocumentDimensions.fromIndexSettings(mappingParserContext.getIndexSettings()), + RoutingFields.fromIndexSettings(mappingParserContext.getIndexSettings()), parent, dynamic, new HashSet<>(), @@ -762,8 +762,8 @@ public XContentParser parser() { /** * The collection of dimensions for this document. */ - public DocumentDimensions getDimensions() { - return dimensions; + public RoutingFields getRoutingFields() { + return routingFields; } public abstract ContentPath path(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index 2efeeba893c6c..09f44f139d8bc 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -549,7 +549,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio private void indexValue(DocumentParserContext context, InetAddress address) { if (dimension) { - context.getDimensions().addIp(fieldType().name(), address).validate(context.indexSettings()); + context.getRoutingFields().addIp(fieldType().name(), address); } if (indexed) { Field field = new InetAddressPoint(fieldType().name(), address); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index ecc708bc94614..32aa422b18bcc 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -944,7 +944,7 @@ private void indexValue(DocumentParserContext context, String value) { final BytesRef binaryValue = new BytesRef(value); if (fieldType().isDimension()) { - context.getDimensions().addString(fieldType().name(), binaryValue).validate(context.indexSettings()); + context.getRoutingFields().addString(fieldType().name(), binaryValue); } // If the UTF8 encoding of the field value is bigger than the max length 32766, Lucene fill fail the indexing request and, to diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 55ed1e10428aa..8c21dfea31b9a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -1991,7 +1991,7 @@ public Number value(XContentParser parser) throws IllegalArgumentException, IOEx */ public void indexValue(DocumentParserContext context, Number numericValue) { if (dimension && numericValue != null) { - context.getDimensions().addLong(fieldType().name(), numericValue.longValue()).validate(context.indexSettings()); + context.getRoutingFields().addLong(fieldType().name(), numericValue.longValue()); } fieldType().type.addFields(context.doc(), fieldType().name(), numericValue, indexed, hasDocValues, stored); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFields.java b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFields.java new file mode 100644 index 0000000000000..4d8d8fdcbd296 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFields.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.IndexSettings; + +import java.net.InetAddress; + +/** + * Collects fields contributing to routing from documents. + */ +public interface RoutingFields { + + /** + * Collect routing fields from index settings + */ + static RoutingFields fromIndexSettings(IndexSettings indexSettings) { + return indexSettings.getMode().buildRoutingFields(indexSettings); + } + + /** + * This overloaded method tries to take advantage of the fact that the UTF-8 + * value is already computed in some cases when we want to collect + * routing fields, so we can save re-computing the UTF-8 encoding. + */ + RoutingFields addString(String fieldName, BytesRef utf8Value); + + default RoutingFields addString(String fieldName, String value) { + return addString(fieldName, new BytesRef(value)); + } + + RoutingFields addIp(String fieldName, InetAddress value); + + RoutingFields addLong(String fieldName, long value); + + RoutingFields addUnsignedLong(String fieldName, long value); + + RoutingFields addBoolean(String fieldName, boolean value); + + /** + * Noop implementation that doesn't perform validations on routing fields + */ + enum Noop implements RoutingFields { + + INSTANCE; + + @Override + public RoutingFields addString(String fieldName, BytesRef utf8Value) { + return this; + } + + @Override + public RoutingFields addString(String fieldName, String value) { + return this; + } + + @Override + public RoutingFields addIp(String fieldName, InetAddress value) { + return this; + } + + @Override + public RoutingFields addLong(String fieldName, long value) { + return this; + } + + @Override + public RoutingFields addUnsignedLong(String fieldName, long value) { + return this; + } + + @Override + public RoutingFields addBoolean(String fieldName, boolean value) { + return this; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RoutingPathFields.java b/server/src/main/java/org/elasticsearch/index/mapper/RoutingPathFields.java new file mode 100644 index 0000000000000..73baca1bf3fdb --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/RoutingPathFields.java @@ -0,0 +1,269 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.StringHelper; +import org.elasticsearch.cluster.routing.IndexRouting; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.hash.Murmur3Hasher; +import org.elasticsearch.common.hash.MurmurHash3; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.util.ByteUtils; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.search.DocValueFormat; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + +/** + * Implementation of routing fields, using field matching based on the routing path content. + */ +public final class RoutingPathFields implements RoutingFields { + + private static final int SEED = 0; + + private static final int MAX_ROUTING_FIELDS = 512; + + private static final int MAX_HASH_LEN_BYTES = 2; + static { + assert MAX_HASH_LEN_BYTES == StreamOutput.putVInt(new byte[2], hashLen(MAX_ROUTING_FIELDS), 0); + } + + /** + * A map of the serialized values of routing fields that will be used + * for generating the _tsid field. The map will be used by {@link RoutingPathFields} + * to build the _tsid field for the document. + */ + private final SortedMap> routingValues = new TreeMap<>(); + + /** + * Builds the routing. Used for building {@code _id}. If null then skipped. + */ + @Nullable + private final IndexRouting.ExtractFromSource.Builder routingBuilder; + + public RoutingPathFields(@Nullable IndexRouting.ExtractFromSource.Builder routingBuilder) { + this.routingBuilder = routingBuilder; + } + + SortedMap> routingValues() { + return Collections.unmodifiableSortedMap(routingValues); + } + + IndexRouting.ExtractFromSource.Builder routingBuilder() { + return routingBuilder; + } + + /** + * Here we build the hash of the routing values using a similarity function so that we have a result + * with the following pattern: + * + * hash128(concatenate(routing field names)) + + * foreach(routing field value, limit = MAX_ROUTING_FIELDS) { hash32(routing field value) } + + * hash128(concatenate(routing field values)) + * + * The idea is to be able to place 'similar' values close to each other. + */ + public BytesReference buildHash() { + Murmur3Hasher hasher = new Murmur3Hasher(SEED); + + // NOTE: hash all routing field names + int numberOfFields = Math.min(MAX_ROUTING_FIELDS, routingValues.size()); + int len = hashLen(numberOfFields); + // either one or two bytes are occupied by the vint since we're bounded by #MAX_ROUTING_FIELDS + byte[] hash = new byte[MAX_HASH_LEN_BYTES + len]; + int index = StreamOutput.putVInt(hash, len, 0); + + hasher.reset(); + for (final BytesRef name : routingValues.keySet()) { + hasher.update(name.bytes); + } + index = writeHash128(hasher.digestHash(), hash, index); + + // NOTE: concatenate all routing field value hashes up to a certain number of fields + int startIndex = index; + for (final List values : routingValues.values()) { + if ((index - startIndex) >= 4 * numberOfFields) { + break; + } + assert values.isEmpty() == false : "routing values are empty"; + final BytesRef routingValue = values.get(0).toBytesRef(); + ByteUtils.writeIntLE( + StringHelper.murmurhash3_x86_32(routingValue.bytes, routingValue.offset, routingValue.length, SEED), + hash, + index + ); + index += 4; + } + + // NOTE: hash all routing field allValues + hasher.reset(); + for (final List values : routingValues.values()) { + for (BytesReference v : values) { + hasher.update(v.toBytesRef().bytes); + } + } + index = writeHash128(hasher.digestHash(), hash, index); + + return new BytesArray(hash, 0, index); + } + + private static int hashLen(int numberOfFields) { + return 16 + 16 + 4 * numberOfFields; + } + + private static int writeHash128(final MurmurHash3.Hash128 hash128, byte[] buffer, int index) { + ByteUtils.writeLongLE(hash128.h1, buffer, index); + index += 8; + ByteUtils.writeLongLE(hash128.h2, buffer, index); + index += 8; + return index; + } + + @Override + public RoutingFields addString(String fieldName, BytesRef utf8Value) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.write((byte) 's'); + /* + * Write in utf8 instead of StreamOutput#writeString which is utf-16-ish + * so it's easier for folks to reason about the space taken up. Mostly + * it'll be smaller too. + */ + out.writeBytesRef(utf8Value); + add(fieldName, out.bytes()); + + if (routingBuilder != null) { + routingBuilder.addMatching(fieldName, utf8Value); + } + } catch (IOException e) { + throw new IllegalArgumentException("Routing field cannot be serialized.", e); + } + return this; + } + + @Override + public RoutingFields addIp(String fieldName, InetAddress value) { + return addString(fieldName, NetworkAddress.format(value)); + } + + @Override + public RoutingFields addLong(String fieldName, long value) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.write((byte) 'l'); + out.writeLong(value); + add(fieldName, out.bytes()); + } catch (IOException e) { + throw new IllegalArgumentException("Routing field cannot be serialized.", e); + } + return this; + } + + @Override + public RoutingFields addUnsignedLong(String fieldName, long value) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + Object ul = DocValueFormat.UNSIGNED_LONG_SHIFTED.format(value); + if (ul instanceof Long l) { + out.write((byte) 'l'); + out.writeLong(l); + } else { + out.write((byte) 'u'); + out.writeLong(value); + } + add(fieldName, out.bytes()); + return this; + } catch (IOException e) { + throw new IllegalArgumentException("Routing field cannot be serialized.", e); + } + } + + @Override + public RoutingFields addBoolean(String fieldName, boolean value) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.write((byte) 'b'); + out.write(value ? 't' : 'f'); + add(fieldName, out.bytes()); + } catch (IOException e) { + throw new IllegalArgumentException("Routing field cannot be serialized.", e); + } + return this; + } + + private void add(String fieldName, BytesReference encoded) throws IOException { + BytesRef name = new BytesRef(fieldName); + List values = routingValues.get(name); + if (values == null) { + // optimize for the common case where routing fields are not multi-valued + routingValues.put(name, List.of(encoded)); + } else { + if (values.size() == 1) { + // converts the immutable list that's optimized for the common case of having only one value to a mutable list + BytesReference previousValue = values.get(0); + values = new ArrayList<>(4); + values.add(previousValue); + routingValues.put(name, values); + } + values.add(encoded); + } + } + + public static Map decodeAsMap(BytesRef bytesRef) { + try (StreamInput in = new BytesArray(bytesRef).streamInput()) { + int size = in.readVInt(); + Map result = new LinkedHashMap<>(size); + + for (int i = 0; i < size; i++) { + String name = null; + try { + name = in.readSlicedBytesReference().utf8ToString(); + } catch (AssertionError ae) { + throw new IllegalArgumentException("Error parsing routing field: " + ae.getMessage(), ae); + } + + int type = in.read(); + switch (type) { + case (byte) 's' -> { + // parse a string + try { + result.put(name, in.readSlicedBytesReference().utf8ToString()); + } catch (AssertionError ae) { + throw new IllegalArgumentException("Error parsing routing field: " + ae.getMessage(), ae); + } + } + case (byte) 'l' -> // parse a long + result.put(name, in.readLong()); + case (byte) 'u' -> { // parse an unsigned_long + Object ul = DocValueFormat.UNSIGNED_LONG_SHIFTED.format(in.readLong()); + result.put(name, ul); + } + case (byte) 'd' -> // parse a double + result.put(name, in.readDouble()); + case (byte) 'b' -> // parse a boolean + result.put(name, in.read() == 't'); + default -> throw new IllegalArgumentException("Cannot parse [" + name + "]: Unknown type [" + type + "]"); + } + } + return result; + } catch (IOException | IllegalArgumentException e) { + throw new IllegalArgumentException("Routing field cannot be deserialized:" + e.getMessage(), e); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java index a6b2ad265decf..8af3c3e6ec270 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java @@ -12,21 +12,11 @@ import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.StringHelper; -import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.hash.Murmur3Hasher; -import org.elasticsearch.common.hash.MurmurHash3; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.network.NetworkAddress; -import org.elasticsearch.common.util.ByteUtils; -import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexMode; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.fielddata.FieldData; @@ -40,15 +30,10 @@ import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import java.io.IOException; -import java.net.InetAddress; import java.time.ZoneId; -import java.util.ArrayList; import java.util.Collections; -import java.util.LinkedHashMap; import java.util.List; -import java.util.Map; import java.util.SortedMap; -import java.util.TreeMap; /** * Mapper for {@code _tsid} field included generated when the index is @@ -136,15 +121,24 @@ private TimeSeriesIdFieldMapper() { public void postParse(DocumentParserContext context) throws IOException { assert fieldType().isIndexed() == false; - final TimeSeriesIdBuilder timeSeriesIdBuilder = (TimeSeriesIdBuilder) context.getDimensions(); - final BytesRef timeSeriesId = getIndexVersionCreated(context).before(IndexVersions.TIME_SERIES_ID_HASHING) - ? timeSeriesIdBuilder.buildLegacyTsid().toBytesRef() - : timeSeriesIdBuilder.buildTsidHash().toBytesRef(); + final RoutingPathFields routingPathFields = (RoutingPathFields) context.getRoutingFields(); + final BytesRef timeSeriesId; + if (getIndexVersionCreated(context).before(IndexVersions.TIME_SERIES_ID_HASHING)) { + long limit = context.indexSettings().getValue(MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING); + int size = routingPathFields.routingValues().size(); + if (size > limit) { + throw new MapperException("Too many dimension fields [" + size + "], max [" + limit + "] dimension fields allowed"); + } + timeSeriesId = buildLegacyTsid(routingPathFields).toBytesRef(); + } else { + timeSeriesId = routingPathFields.buildHash().toBytesRef(); + } context.doc().add(new SortedDocValuesField(fieldType().name(), timeSeriesId)); + TsidExtractingIdFieldMapper.createField( context, getIndexVersionCreated(context).before(IndexVersions.TIME_SERIES_ROUTING_HASH_IN_ID) - ? timeSeriesIdBuilder.routingBuilder + ? routingPathFields.routingBuilder() : null, timeSeriesId ); @@ -170,231 +164,6 @@ public static Object encodeTsid(StreamInput in) { } } - public static class TimeSeriesIdBuilder implements DocumentDimensions { - - private static final int SEED = 0; - - public static final int MAX_DIMENSIONS = 512; - - private final Murmur3Hasher tsidHasher = new Murmur3Hasher(0); - - /** - * A map of the serialized values of dimension fields that will be used - * for generating the _tsid field. The map will be used by {@link TimeSeriesIdFieldMapper} - * to build the _tsid field for the document. - */ - private final SortedMap> dimensions = new TreeMap<>(); - /** - * Builds the routing. Used for building {@code _id}. If null then skipped. - */ - @Nullable - private final IndexRouting.ExtractFromSource.Builder routingBuilder; - - public TimeSeriesIdBuilder(@Nullable IndexRouting.ExtractFromSource.Builder routingBuilder) { - this.routingBuilder = routingBuilder; - } - - public BytesReference buildLegacyTsid() throws IOException { - if (dimensions.isEmpty()) { - throw new IllegalArgumentException("Dimension fields are missing."); - } - - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.writeVInt(dimensions.size()); - for (Map.Entry> entry : dimensions.entrySet()) { - out.writeBytesRef(entry.getKey()); - List value = entry.getValue(); - if (value.size() > 1) { - // multi-value dimensions are only supported for newer indices that use buildTsidHash - throw new IllegalArgumentException( - "Dimension field [" + entry.getKey().utf8ToString() + "] cannot be a multi-valued field." - ); - } - assert value.isEmpty() == false : "dimension value is empty"; - value.get(0).writeTo(out); - } - return out.bytes(); - } - } - - private static final int MAX_HASH_LEN_BYTES = 2; - - static { - assert MAX_HASH_LEN_BYTES == StreamOutput.putVInt(new byte[2], tsidHashLen(MAX_DIMENSIONS), 0); - } - - /** - * Here we build the hash of the tsid using a similarity function so that we have a result - * with the following pattern: - * - * hash128(catenate(dimension field names)) + - * foreach(dimension field value, limit = MAX_DIMENSIONS) { hash32(dimension field value) } + - * hash128(catenate(dimension field values)) - * - * The idea is to be able to place 'similar' time series close to each other. Two time series - * are considered 'similar' if they share the same dimensions (names and values). - */ - public BytesReference buildTsidHash() { - // NOTE: hash all dimension field names - int numberOfDimensions = Math.min(MAX_DIMENSIONS, dimensions.size()); - int len = tsidHashLen(numberOfDimensions); - // either one or two bytes are occupied by the vint since we're bounded by #MAX_DIMENSIONS - byte[] tsidHash = new byte[MAX_HASH_LEN_BYTES + len]; - int tsidHashIndex = StreamOutput.putVInt(tsidHash, len, 0); - - tsidHasher.reset(); - for (final BytesRef name : dimensions.keySet()) { - tsidHasher.update(name.bytes); - } - tsidHashIndex = writeHash128(tsidHasher.digestHash(), tsidHash, tsidHashIndex); - - // NOTE: concatenate all dimension value hashes up to a certain number of dimensions - int tsidHashStartIndex = tsidHashIndex; - for (final List values : dimensions.values()) { - if ((tsidHashIndex - tsidHashStartIndex) >= 4 * numberOfDimensions) { - break; - } - assert values.isEmpty() == false : "dimension values are empty"; - final BytesRef dimensionValueBytesRef = values.get(0).toBytesRef(); - ByteUtils.writeIntLE( - StringHelper.murmurhash3_x86_32( - dimensionValueBytesRef.bytes, - dimensionValueBytesRef.offset, - dimensionValueBytesRef.length, - SEED - ), - tsidHash, - tsidHashIndex - ); - tsidHashIndex += 4; - } - - // NOTE: hash all dimension field allValues - tsidHasher.reset(); - for (final List values : dimensions.values()) { - for (BytesReference v : values) { - tsidHasher.update(v.toBytesRef().bytes); - } - } - tsidHashIndex = writeHash128(tsidHasher.digestHash(), tsidHash, tsidHashIndex); - - return new BytesArray(tsidHash, 0, tsidHashIndex); - } - - private static int tsidHashLen(int numberOfDimensions) { - return 16 + 16 + 4 * numberOfDimensions; - } - - private int writeHash128(final MurmurHash3.Hash128 hash128, byte[] buffer, int tsidHashIndex) { - ByteUtils.writeLongLE(hash128.h1, buffer, tsidHashIndex); - tsidHashIndex += 8; - ByteUtils.writeLongLE(hash128.h2, buffer, tsidHashIndex); - tsidHashIndex += 8; - return tsidHashIndex; - } - - @Override - public DocumentDimensions addString(String fieldName, BytesRef utf8Value) { - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.write((byte) 's'); - /* - * Write in utf8 instead of StreamOutput#writeString which is utf-16-ish - * so it's easier for folks to reason about the space taken up. Mostly - * it'll be smaller too. - */ - out.writeBytesRef(utf8Value); - add(fieldName, out.bytes()); - - if (routingBuilder != null) { - routingBuilder.addMatching(fieldName, utf8Value); - } - } catch (IOException e) { - throw new IllegalArgumentException("Dimension field cannot be serialized.", e); - } - return this; - } - - @Override - public DocumentDimensions addIp(String fieldName, InetAddress value) { - return addString(fieldName, NetworkAddress.format(value)); - } - - @Override - public DocumentDimensions addLong(String fieldName, long value) { - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.write((byte) 'l'); - out.writeLong(value); - add(fieldName, out.bytes()); - } catch (IOException e) { - throw new IllegalArgumentException("Dimension field cannot be serialized.", e); - } - return this; - } - - @Override - public DocumentDimensions addUnsignedLong(String fieldName, long value) { - try (BytesStreamOutput out = new BytesStreamOutput()) { - Object ul = DocValueFormat.UNSIGNED_LONG_SHIFTED.format(value); - if (ul instanceof Long l) { - out.write((byte) 'l'); - out.writeLong(l); - } else { - out.write((byte) 'u'); - out.writeLong(value); - } - add(fieldName, out.bytes()); - return this; - } catch (IOException e) { - throw new IllegalArgumentException("Dimension field cannot be serialized.", e); - } - } - - @Override - public DocumentDimensions addBoolean(String fieldName, boolean value) { - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.write((byte) 'b'); - out.write(value ? 't' : 'f'); - add(fieldName, out.bytes()); - } catch (IOException e) { - throw new IllegalArgumentException("Dimension field cannot be serialized.", e); - } - return this; - } - - @Override - public DocumentDimensions validate(final IndexSettings settings) { - if (settings.getIndexVersionCreated().before(IndexVersions.TIME_SERIES_ID_HASHING) - && dimensions.size() > settings.getValue(MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING)) { - throw new MapperException( - "Too many dimension fields [" - + dimensions.size() - + "], max [" - + settings.getValue(MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING) - + "] dimension fields allowed" - ); - } - return this; - } - - private void add(String fieldName, BytesReference encoded) throws IOException { - BytesRef name = new BytesRef(fieldName); - List values = dimensions.get(name); - if (values == null) { - // optimize for the common case where dimensions are not multi-valued - dimensions.put(name, List.of(encoded)); - } else { - if (values.size() == 1) { - // converts the immutable list that's optimized for the common case of having only one value to a mutable list - BytesReference previousValue = values.get(0); - values = new ArrayList<>(4); - values.add(previousValue); - dimensions.put(name, values); - } - values.add(encoded); - } - } - } - public static Object encodeTsid(final BytesRef bytesRef) { return base64Encode(bytesRef); } @@ -405,53 +174,27 @@ private static String base64Encode(final BytesRef bytesRef) { return Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(bytes); } - public static Map decodeTsidAsMap(BytesRef bytesRef) { - try (StreamInput input = new BytesArray(bytesRef).streamInput()) { - return decodeTsidAsMap(input); - } catch (IOException ex) { - throw new IllegalArgumentException("Dimension field cannot be deserialized.", ex); - } - } - - public static Map decodeTsidAsMap(StreamInput in) { - try { - int size = in.readVInt(); - Map result = new LinkedHashMap<>(size); - - for (int i = 0; i < size; i++) { - String name = null; - try { - name = in.readSlicedBytesReference().utf8ToString(); - } catch (AssertionError ae) { - throw new IllegalArgumentException("Error parsing keyword dimension: " + ae.getMessage(), ae); - } - - int type = in.read(); - switch (type) { - case (byte) 's' -> { - // parse a string - try { - result.put(name, in.readSlicedBytesReference().utf8ToString()); - } catch (AssertionError ae) { - throw new IllegalArgumentException("Error parsing keyword dimension: " + ae.getMessage(), ae); - } - } - case (byte) 'l' -> // parse a long - result.put(name, in.readLong()); - case (byte) 'u' -> { // parse an unsigned_long - Object ul = DocValueFormat.UNSIGNED_LONG_SHIFTED.format(in.readLong()); - result.put(name, ul); - } - case (byte) 'd' -> // parse a double - result.put(name, in.readDouble()); - case (byte) 'b' -> // parse a boolean - result.put(name, in.read() == 't'); - default -> throw new IllegalArgumentException("Cannot parse [" + name + "]: Unknown type [" + type + "]"); + public static BytesReference buildLegacyTsid(RoutingPathFields routingPathFields) throws IOException { + SortedMap> routingValues = routingPathFields.routingValues(); + if (routingValues.isEmpty()) { + throw new IllegalArgumentException("Dimension fields are missing."); + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(routingValues.size()); + for (var entry : routingValues.entrySet()) { + out.writeBytesRef(entry.getKey()); + List value = entry.getValue(); + if (value.size() > 1) { + // multi-value dimensions are only supported for newer indices that use buildTsidHash + throw new IllegalArgumentException( + "Dimension field [" + entry.getKey().utf8ToString() + "] cannot be a multi-valued field." + ); } + assert value.isEmpty() == false : "dimension value is empty"; + value.get(0).writeTo(out); } - return result; - } catch (IOException | IllegalArgumentException e) { - throw new IllegalArgumentException("Error formatting " + NAME + ": " + e.getMessage(), e); + return out.bytes(); } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java index 351e3149da3df..93ef04ddd159a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java @@ -184,10 +184,7 @@ private void addField(Context context, ContentPath path, String currentName, Str final String keyedFieldName = FlattenedFieldParser.extractKey(bytesKeyedValue).utf8ToString(); if (fieldType.isDimension() && fieldType.dimensions().contains(keyedFieldName)) { final BytesRef keyedFieldValue = FlattenedFieldParser.extractValue(bytesKeyedValue); - context.documentParserContext() - .getDimensions() - .addString(rootFieldFullPath + "." + keyedFieldName, keyedFieldValue) - .validate(context.documentParserContext().indexSettings()); + context.documentParserContext().getRoutingFields().addString(rootFieldFullPath + "." + keyedFieldName, keyedFieldValue); } } } diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 5354b1097326b..b424b417da82b 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -1077,7 +1077,8 @@ private void construct( searchTransportService, indexingLimits, searchModule.getValuesSourceRegistry().getUsageService(), - repositoriesService + repositoriesService, + compatibilityVersions ); final TimeValue metricsInterval = settings.getAsTime("telemetry.agent.metrics_interval", TimeValue.timeValueSeconds(10)); diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java index 9310849ba8111..7c71487ed68ca 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeService.java +++ b/server/src/main/java/org/elasticsearch/node/NodeService.java @@ -10,7 +10,6 @@ package org.elasticsearch.node; import org.elasticsearch.Build; -import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; @@ -19,6 +18,7 @@ import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.unit.ByteSizeValue; @@ -65,6 +65,7 @@ public class NodeService implements Closeable { private final Coordinator coordinator; private final RepositoriesService repositoriesService; private final Map componentVersions; + private final CompatibilityVersions compatibilityVersions; NodeService( Settings settings, @@ -84,7 +85,8 @@ public class NodeService implements Closeable { SearchTransportService searchTransportService, IndexingPressure indexingPressure, AggregationUsageService aggregationUsageService, - RepositoriesService repositoriesService + RepositoriesService repositoriesService, + CompatibilityVersions compatibilityVersions ) { this.settings = settings; this.threadPool = threadPool; @@ -104,6 +106,7 @@ public class NodeService implements Closeable { this.aggregationUsageService = aggregationUsageService; this.repositoriesService = repositoriesService; this.componentVersions = findComponentVersions(pluginService); + this.compatibilityVersions = compatibilityVersions; clusterService.addStateApplier(ingestService); } @@ -124,7 +127,7 @@ public NodeInfo info( return new NodeInfo( // TODO: revert to Build.current().version() when Kibana is updated Version.CURRENT.toString(), - TransportVersion.current(), + compatibilityVersions, IndexVersion.current(), componentVersions, Build.current(), diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index bdefee988248f..51f52326907eb 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -22,8 +22,8 @@ import org.elasticsearch.common.util.LocaleUtils; import org.elasticsearch.geometry.utils.Geohash; import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; -import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper.TimeSeriesIdBuilder; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils; import java.io.IOException; @@ -729,7 +729,7 @@ public Object format(BytesRef value) { try { // NOTE: if the tsid is a map of dimension key/value pairs (as it was before introducing // tsid hashing) we just decode the map and return it. - return TimeSeriesIdFieldMapper.decodeTsidAsMap(value); + return RoutingPathFields.decodeAsMap(value); } catch (Exception e) { // NOTE: otherwise the _tsid field is just a hash and we can't decode it return TimeSeriesIdFieldMapper.encodeTsid(value); @@ -760,20 +760,20 @@ private BytesRef parseBytesRefMap(Object value) { } Map m = (Map) value; - TimeSeriesIdBuilder builder = new TimeSeriesIdBuilder(null); + RoutingPathFields routingPathFields = new RoutingPathFields(null); for (Map.Entry entry : m.entrySet()) { String f = entry.getKey().toString(); Object v = entry.getValue(); if (v instanceof String s) { - builder.addString(f, s); + routingPathFields.addString(f, s); } else if (v instanceof Long l) { - builder.addLong(f, l); + routingPathFields.addLong(f, l); } else if (v instanceof Integer i) { - builder.addLong(f, i.longValue()); + routingPathFields.addLong(f, i.longValue()); } else if (v instanceof BigInteger ul) { long ll = UNSIGNED_LONG_SHIFTED.parseLong(ul.toString(), false, () -> 0L); - builder.addUnsignedLong(f, ll); + routingPathFields.addUnsignedLong(f, ll); } else { throw new IllegalArgumentException("Unexpected value in tsid object [" + v + "]"); } @@ -781,7 +781,7 @@ private BytesRef parseBytesRefMap(Object value) { try { // NOTE: we can decode the tsid only if it is not hashed (represented as a map) - return builder.buildLegacyTsid().toBytesRef(); + return TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef(); } catch (IOException e) { throw new IllegalArgumentException(e); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java index 17fab91d97cad..bb4aa9beeb42e 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java @@ -101,7 +101,7 @@ public void testDeleteDesiredBalance() throws Exception { var clusterSettings = ClusterSettings.createBuiltInClusterSettings(settings); var delegate = new BalancedShardsAllocator(); - var computer = new DesiredBalanceComputer(clusterSettings, threadPool, delegate) { + var computer = new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, delegate) { final AtomicReference lastComputationInput = new AtomicReference<>(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java index 5fa138abca809..9d01f411d35aa 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.Build; import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -40,7 +41,7 @@ public class NodeInfoTests extends ESTestCase { public void testGetInfo() { NodeInfo nodeInfo = new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), Build.current(), diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java index 3eb0ff9fae674..6a9d6973a0047 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -78,7 +79,7 @@ public void testDoExecuteForRemoteServerNodes() { nodeInfos.add( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, @@ -156,7 +157,7 @@ public void testDoExecuteForRemoteNodes() { nodeInfos.add( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java index 44ceb94b392e5..627c57e07a1f3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodeStatsTests; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; @@ -327,7 +328,7 @@ private static NodeInfo createNodeInfo(String nodeId, String transportType, Stri } return new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), Build.current(), diff --git a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java index 331f754d437a7..0bc5c69d8ad4b 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -103,7 +104,7 @@ public void setup() { NodeInfo nodeInfo = new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), Build.current(), diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java index 56a687646b364..51401acabb0ac 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java @@ -53,7 +53,6 @@ import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import org.elasticsearch.test.MockLog; -import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.HashMap; @@ -85,8 +84,6 @@ import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class DesiredBalanceComputerTests extends ESAllocationTestCase { @@ -1205,43 +1202,43 @@ public void testShouldLogComputationIteration() { } private void checkIterationLogging(int iterations, long eachIterationDuration, MockLog.AbstractEventExpectation expectation) { - - var mockThreadPool = mock(ThreadPool.class); var currentTime = new AtomicLong(0L); - when(mockThreadPool.relativeTimeInMillis()).thenAnswer(invocation -> currentTime.addAndGet(eachIterationDuration)); - // Some runs of this test try to simulate a long desired balance computation. Setting a high value on the following setting // prevents interrupting a long computation. var clusterSettings = createBuiltInClusterSettings( Settings.builder().put(DesiredBalanceComputer.MAX_BALANCE_COMPUTATION_TIME_DURING_INDEX_CREATION_SETTING.getKey(), "2m").build() ); - var desiredBalanceComputer = new DesiredBalanceComputer(clusterSettings, mockThreadPool, new ShardsAllocator() { - @Override - public void allocate(RoutingAllocation allocation) { - final var unassignedIterator = allocation.routingNodes().unassigned().iterator(); - while (unassignedIterator.hasNext()) { - final var shardRouting = unassignedIterator.next(); - if (shardRouting.primary()) { - unassignedIterator.initialize("node-0", null, 0L, allocation.changes()); - } else { - unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.NO_ATTEMPT, allocation.changes()); + var desiredBalanceComputer = new DesiredBalanceComputer( + clusterSettings, + () -> currentTime.addAndGet(eachIterationDuration), + new ShardsAllocator() { + @Override + public void allocate(RoutingAllocation allocation) { + final var unassignedIterator = allocation.routingNodes().unassigned().iterator(); + while (unassignedIterator.hasNext()) { + final var shardRouting = unassignedIterator.next(); + if (shardRouting.primary()) { + unassignedIterator.initialize("node-0", null, 0L, allocation.changes()); + } else { + unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.NO_ATTEMPT, allocation.changes()); + } } - } - // move shard on each iteration - for (var shard : allocation.routingNodes().node("node-0").shardsWithState(STARTED).toList()) { - allocation.routingNodes().relocateShard(shard, "node-1", 0L, "test", allocation.changes()); - } - for (var shard : allocation.routingNodes().node("node-1").shardsWithState(STARTED).toList()) { - allocation.routingNodes().relocateShard(shard, "node-0", 0L, "test", allocation.changes()); + // move shard on each iteration + for (var shard : allocation.routingNodes().node("node-0").shardsWithState(STARTED).toList()) { + allocation.routingNodes().relocateShard(shard, "node-1", 0L, "test", allocation.changes()); + } + for (var shard : allocation.routingNodes().node("node-1").shardsWithState(STARTED).toList()) { + allocation.routingNodes().relocateShard(shard, "node-0", 0L, "test", allocation.changes()); + } } - } - @Override - public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { - throw new AssertionError("only used for allocation explain"); + @Override + public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { + throw new AssertionError("only used for allocation explain"); + } } - }); + ); assertThatLogger(() -> { var iteration = new AtomicInteger(0); @@ -1349,7 +1346,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing } private static DesiredBalanceComputer createDesiredBalanceComputer(ShardsAllocator allocator) { - return new DesiredBalanceComputer(createBuiltInClusterSettings(), mock(ThreadPool.class), allocator); + return new DesiredBalanceComputer(createBuiltInClusterSettings(), () -> 0L, allocator); } private static void assertDesiredAssignments(DesiredBalance desiredBalance, Map expected) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java index 27c430131ff07..2cb3204787ce1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java @@ -396,7 +396,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing shardsAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, shardsAllocator, time::get) { + new DesiredBalanceComputer(clusterSettings, time::get, shardsAllocator) { @Override public DesiredBalance compute( DesiredBalance previousDesiredBalance, @@ -522,7 +522,7 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo shardsAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, threadPool, shardsAllocator) { + new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, shardsAllocator) { @Override public DesiredBalance compute( DesiredBalance previousDesiredBalance, @@ -625,7 +625,7 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo shardsAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, threadPool, shardsAllocator) { + new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, shardsAllocator) { @Override public DesiredBalance compute( DesiredBalance previousDesiredBalance, @@ -712,7 +712,7 @@ public void testResetDesiredBalance() { var delegateAllocator = createShardsAllocator(); var clusterSettings = createBuiltInClusterSettings(); - var desiredBalanceComputer = new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator) { + var desiredBalanceComputer = new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, delegateAllocator) { final AtomicReference lastComputationInput = new AtomicReference<>(); @@ -780,7 +780,11 @@ public void testResetDesiredBalanceOnNoLongerMaster() { var clusterService = ClusterServiceUtils.createClusterService(clusterState, threadPool); var delegateAllocator = createShardsAllocator(); - var desiredBalanceComputer = new DesiredBalanceComputer(createBuiltInClusterSettings(), threadPool, delegateAllocator); + var desiredBalanceComputer = new DesiredBalanceComputer( + createBuiltInClusterSettings(), + threadPool::relativeTimeInMillis, + delegateAllocator + ); var desiredBalanceShardsAllocator = new DesiredBalanceShardsAllocator( delegateAllocator, threadPool, @@ -829,7 +833,11 @@ public void testResetDesiredBalanceOnNodeShutdown() { final var resetCalled = new AtomicBoolean(); var delegateAllocator = createShardsAllocator(); - var desiredBalanceComputer = new DesiredBalanceComputer(createBuiltInClusterSettings(), threadPool, delegateAllocator); + var desiredBalanceComputer = new DesiredBalanceComputer( + createBuiltInClusterSettings(), + threadPool::relativeTimeInMillis, + delegateAllocator + ); var desiredBalanceAllocator = new DesiredBalanceShardsAllocator( delegateAllocator, threadPool, diff --git a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java index f9d3b7fcc920b..9eec8309bbb83 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.Maps; import org.elasticsearch.features.FeatureService; +import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.Scheduler; import org.mockito.ArgumentCaptor; @@ -34,11 +35,14 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executor; +import static java.util.Map.entry; import static org.elasticsearch.test.LambdaMatchers.transformedMatch; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.same; @@ -77,7 +81,7 @@ private static Map versions(T... versions) { return tvs; } - private static NodesInfoResponse getResponse(Map responseData) { + private static NodesInfoResponse getResponse(Map responseData) { return new NodesInfoResponse( ClusterName.DEFAULT, responseData.entrySet() @@ -207,10 +211,19 @@ public void testVersionsAreFixed() { argThat(transformedMatch(NodesInfoRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), action.capture() ); - action.getValue().onResponse(getResponse(Map.of("node1", NEXT_TRANSPORT_VERSION, "node2", NEXT_TRANSPORT_VERSION))); + action.getValue() + .onResponse( + getResponse( + Map.ofEntries( + entry("node1", new CompatibilityVersions(NEXT_TRANSPORT_VERSION, Map.of())), + entry("node2", new CompatibilityVersions(NEXT_TRANSPORT_VERSION, Map.of())) + ) + ) + ); verify(taskQueue).submitTask(anyString(), task.capture(), any()); - assertThat(task.getValue().results(), equalTo(Map.of("node1", NEXT_TRANSPORT_VERSION, "node2", NEXT_TRANSPORT_VERSION))); + assertThat(task.getValue().results().keySet(), equalTo(Set.of("node1", "node2"))); + assertThat(task.getValue().results().values(), everyItem(equalTo(NEXT_TRANSPORT_VERSION))); } public void testConcurrentChangesDoNotOverlap() { @@ -259,12 +272,17 @@ public void testFailedRequestsAreRetried() { Scheduler scheduler = mock(Scheduler.class); Executor executor = mock(Executor.class); + var compatibilityVersions = new CompatibilityVersions( + TransportVersion.current(), + Map.of(".system-index-1", new SystemIndexDescriptor.MappingsVersion(1, 1234)) + ); ClusterState testState1 = ClusterState.builder(ClusterState.EMPTY_STATE) - .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) + .nodes(node(Version.CURRENT, Version.CURRENT, Version.CURRENT)) .nodeIdsToCompatibilityVersions( - Maps.transformValues( - versions(NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0, TransportVersions.V_8_8_0), - transportVersion -> new CompatibilityVersions(transportVersion, Map.of()) + Map.ofEntries( + entry("node0", compatibilityVersions), + entry("node1", new CompatibilityVersions(TransportVersions.V_8_8_0, Map.of())), + entry("node2", new CompatibilityVersions(TransportVersions.V_8_8_0, Map.of())) ) ) .build(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java index b07ec8e7cb683..083efccceec16 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java @@ -27,11 +27,7 @@ import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.core.CheckedConsumer; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -72,8 +68,6 @@ public void testSynthesizeIdSimple() throws Exception { } public void testSynthesizeIdMultipleSegments() throws Exception { - var routingPaths = List.of("dim1"); - var routing = createRouting(routingPaths); var idLoader = IdLoader.createTsIdLoader(null, null); long startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-01T00:00:00Z"); @@ -144,8 +138,6 @@ public void testSynthesizeIdMultipleSegments() throws Exception { } public void testSynthesizeIdRandom() throws Exception { - var routingPaths = List.of("dim1"); - var routing = createRouting(routingPaths); var idLoader = IdLoader.createTsIdLoader(null, null); long startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-01T00:00:00Z"); @@ -153,7 +145,6 @@ public void testSynthesizeIdRandom() throws Exception { List randomDocs = new ArrayList<>(); int numberOfTimeSeries = randomIntBetween(8, 64); for (int i = 0; i < numberOfTimeSeries; i++) { - long routingId = 0; int numberOfDimensions = randomIntBetween(1, 6); List dimensions = new ArrayList<>(numberOfDimensions); for (int j = 1; j <= numberOfDimensions; j++) { @@ -165,7 +156,6 @@ public void testSynthesizeIdRandom() throws Exception { value = randomAlphaOfLength(4); } dimensions.add(new Dimension(fieldName, value)); - routingId = value.hashCode(); } int numberOfSamples = randomIntBetween(1, 16); for (int j = 0; j < numberOfSamples; j++) { @@ -225,21 +215,21 @@ private void prepareIndexReader( } private static void indexDoc(IndexWriter iw, Doc doc, int routingHash) throws IOException { - final TimeSeriesIdFieldMapper.TimeSeriesIdBuilder builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + var routingFields = new RoutingPathFields(null); final List fields = new ArrayList<>(); fields.add(new SortedNumericDocValuesField(DataStreamTimestampFieldMapper.DEFAULT_PATH, doc.timestamp)); fields.add(new LongPoint(DataStreamTimestampFieldMapper.DEFAULT_PATH, doc.timestamp)); for (Dimension dimension : doc.dimensions) { if (dimension.value instanceof Number n) { - builder.addLong(dimension.field, n.longValue()); + routingFields.addLong(dimension.field, n.longValue()); fields.add(new SortedNumericDocValuesField(dimension.field, ((Number) dimension.value).longValue())); } else { - builder.addString(dimension.field, dimension.value.toString()); + routingFields.addString(dimension.field, dimension.value.toString()); fields.add(new SortedSetDocValuesField(dimension.field, new BytesRef(dimension.value.toString()))); } } - BytesRef tsid = builder.buildTsidHash().toBytesRef(); + BytesRef tsid = routingFields.buildHash().toBytesRef(); fields.add(new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, tsid)); fields.add( new SortedDocValuesField( @@ -251,25 +241,15 @@ private static void indexDoc(IndexWriter iw, Doc doc, int routingHash) throws IO } private static String expectedId(Doc doc, int routingHash) throws IOException { - var timeSeriesIdBuilder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + var routingFields = new RoutingPathFields(null); for (Dimension dimension : doc.dimensions) { if (dimension.value instanceof Number n) { - timeSeriesIdBuilder.addLong(dimension.field, n.longValue()); + routingFields.addLong(dimension.field, n.longValue()); } else { - timeSeriesIdBuilder.addString(dimension.field, dimension.value.toString()); + routingFields.addString(dimension.field, dimension.value.toString()); } } - return TsidExtractingIdFieldMapper.createId(routingHash, timeSeriesIdBuilder.buildTsidHash().toBytesRef(), doc.timestamp); - } - - private static IndexRouting.ExtractFromSource createRouting(List routingPaths) { - var settings = indexSettings(IndexVersion.current(), 2, 1).put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), "2000-01-01T00:00:00.000Z") - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2001-01-01T00:00:00.000Z") - .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), routingPaths) - .build(); - var indexMetadata = IndexMetadata.builder("index").settings(settings).build(); - return (IndexRouting.ExtractFromSource) IndexRouting.fromIndexMetadata(indexMetadata); + return TsidExtractingIdFieldMapper.createId(routingHash, routingFields.buildHash().toBytesRef(), doc.timestamp); } record Doc(long timestamp, List dimensions) {} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RoutingPathFieldsTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RoutingPathFieldsTests.java new file mode 100644 index 0000000000000..2c2c0d160c904 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/RoutingPathFieldsTests.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.IndexRouting; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.test.ESTestCase; + +public class RoutingPathFieldsTests extends ESTestCase { + + public void testWithBuilder() throws Exception { + IndexSettings settings = new IndexSettings( + IndexMetadata.builder("test") + .settings( + indexSettings(IndexVersion.current(), 1, 1).put( + Settings.builder().put("index.mode", "time_series").put("index.routing_path", "path.*").build() + ) + ) + .build(), + Settings.EMPTY + ); + IndexRouting.ExtractFromSource routing = (IndexRouting.ExtractFromSource) settings.getIndexRouting(); + + var routingPathFields = new RoutingPathFields(routing.builder()); + BytesReference current, previous; + + routingPathFields.addString("path.string_name", randomAlphaOfLengthBetween(1, 10)); + current = previous = routingPathFields.buildHash(); + assertNotNull(current); + + routingPathFields.addBoolean("path.boolean_name", randomBoolean()); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addLong("path.long_name", randomLong()); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addIp("path.ip_name", randomIp(randomBoolean())); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addUnsignedLong("path.unsigned_long_name", randomLongBetween(0, Long.MAX_VALUE)); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + assertArrayEquals(current.array(), routingPathFields.buildHash().array()); + } + + public void testWithoutBuilder() throws Exception { + var routingPathFields = new RoutingPathFields(null); + BytesReference current, previous; + + routingPathFields.addString("path.string_name", randomAlphaOfLengthBetween(1, 10)); + current = previous = routingPathFields.buildHash(); + assertNotNull(current); + + routingPathFields.addBoolean("path.boolean_name", randomBoolean()); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addLong("path.long_name", randomLong()); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addIp("path.ip_name", randomIp(randomBoolean())); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addUnsignedLong("path.unsigned_long_name", randomLongBetween(0, Long.MAX_VALUE)); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + assertArrayEquals(current.array(), routingPathFields.buildHash().array()); + } +} diff --git a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java index fd839999edf21..33801dfb98417 100644 --- a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java +++ b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -241,7 +242,7 @@ private static NodeInfo createNodeInfo() { } return new NodeInfo( randomAlphaOfLengthBetween(6, 32), - TransportVersionUtils.randomVersion(random()), + new CompatibilityVersions(TransportVersionUtils.randomVersion(random()), Map.of()), IndexVersionUtils.randomVersion(random()), componentVersions, build, diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestPluginsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestPluginsActionTests.java index 766fefbeddb0f..0994f9bf2303c 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestPluginsActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestPluginsActionTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Table; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.plugins.PluginDescriptor; @@ -66,7 +67,7 @@ private Table buildTable(List pluginDescriptor) { nodeInfos.add( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, diff --git a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java index 5371893993318..e81066a731d2e 100644 --- a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java +++ b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.index.mapper.DateFieldMapper.Resolution; -import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper.TimeSeriesIdBuilder; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesRoutingHashFieldMapper; import org.elasticsearch.test.ESTestCase; @@ -379,11 +379,11 @@ public void testParseZone() { } public void testParseTsid() throws IOException { - TimeSeriesIdBuilder timeSeriesIdBuilder = new TimeSeriesIdBuilder(null); - timeSeriesIdBuilder.addString("string", randomAlphaOfLength(10)); - timeSeriesIdBuilder.addLong("long", randomLong()); - timeSeriesIdBuilder.addUnsignedLong("ulong", randomLong()); - BytesRef expected = timeSeriesIdBuilder.buildTsidHash().toBytesRef(); + var routingFields = new RoutingPathFields(null); + routingFields.addString("string", randomAlphaOfLength(10)); + routingFields.addLong("long", randomLong()); + routingFields.addUnsignedLong("ulong", randomLong()); + BytesRef expected = routingFields.buildHash().toBytesRef(); byte[] expectedBytes = new byte[expected.length]; System.arraycopy(expected.bytes, 0, expectedBytes, 0, expected.length); BytesRef actual = DocValueFormat.TIME_SERIES_ID.parseBytesRef(expected); diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java index 3c7a18de536bc..e684092099948 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesParams; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.aggregations.AggregatorTestCase; @@ -176,9 +176,9 @@ private List docs(long startTimestamp, String dim, long... values) thr } private static BytesReference tsid(String dim) throws IOException { - TimeSeriesIdFieldMapper.TimeSeriesIdBuilder idBuilder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); - idBuilder.addString("dim", dim); - return idBuilder.buildTsidHash(); + var routingFields = new RoutingPathFields(null); + routingFields.addString("dim", dim); + return routingFields.buildHash(); } private Document doc(long timestamp, BytesReference tsid, long counterValue, String dim) { diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java index 9658db911f6df..85cd415102124 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -452,7 +453,7 @@ private static org.elasticsearch.action.admin.cluster.node.info.NodeInfo infoFor OsInfo osInfo = new OsInfo(randomLong(), processors, Processors.of((double) processors), null, null, null, null); return new org.elasticsearch.action.admin.cluster.node.info.NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), Build.current(), diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 1750ccbb8c0ce..193a82436f26a 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -89,6 +89,5 @@ tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("esql/80_text/reverse text", "The output type changed from TEXT to KEYWORD.") task.skipTest("esql/80_text/values function", "The output type changed from TEXT to KEYWORD.") task.skipTest("privileges/11_builtin/Test get builtin privileges" ,"unnecessary to test compatibility") - task.skipTest("enrich/10_basic/Test using the deprecated elasticsearch_version field results in a warning", "The deprecation message was changed") }) diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecks.java new file mode 100644 index 0000000000000..ee029d01427aa --- /dev/null +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecks.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.deprecation; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; + +import java.util.List; + +import static java.util.Map.entry; +import static java.util.Map.ofEntries; + +public class DataStreamDeprecationChecks { + static DeprecationIssue oldIndicesCheck(DataStream dataStream, ClusterState clusterState) { + List backingIndices = dataStream.getIndices(); + boolean hasOldIndices = backingIndices.stream() + .anyMatch(index -> clusterState.metadata().index(index).getCompatibilityVersion().before(IndexVersions.V_8_0_0)); + if (hasOldIndices) { + long totalIndices = backingIndices.size(); + List oldIndices = backingIndices.stream() + .filter(index -> clusterState.metadata().index(index).getCompatibilityVersion().before(IndexVersions.V_8_0_0)) + .toList(); + long totalOldIndices = oldIndices.size(); + long totalOldSearchableSnapshots = oldIndices.stream() + .filter(index -> clusterState.metadata().index(index).isSearchableSnapshot()) + .count(); + long totalOldPartiallyMountedSearchableSnapshots = oldIndices.stream() + .filter(index -> clusterState.metadata().index(index).isPartialSearchableSnapshot()) + .count(); + long totalOldFullyMountedSearchableSnapshots = totalOldSearchableSnapshots - totalOldPartiallyMountedSearchableSnapshots; + return new DeprecationIssue( + DeprecationIssue.Level.CRITICAL, + "Old data stream with a compatibility version < 8.0", + "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html", + "This data stream has backing indices that were created before Elasticsearch 8.0.0", + false, + ofEntries( + entry( + "backing_indices", + ofEntries( + entry("count", totalIndices), + entry( + "need_upgrading", + ofEntries( + entry("count", totalOldIndices), + entry( + "searchable_snapshots", + ofEntries( + entry("count", totalOldSearchableSnapshots), + entry("fully_mounted", ofEntries(entry("count", totalOldFullyMountedSearchableSnapshots))), + entry( + "partially_mounted", + ofEntries(entry("count", totalOldPartiallyMountedSearchableSnapshots)) + ) + ) + ) + ) + ) + ) + ) + ) + ); + } + return null; + } +} diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java index 2f875cc1a3fa9..c80f26cda7b36 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -16,6 +17,7 @@ import java.util.List; import java.util.Objects; +import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; @@ -97,6 +99,10 @@ private DeprecationChecks() {} IndexDeprecationChecks::deprecatedCamelCasePattern ); + static List> DATA_STREAM_CHECKS = List.of( + DataStreamDeprecationChecks::oldIndicesCheck + ); + /** * helper utility function to reduce repeat of running a specific {@link List} of checks. * diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java index cb9efd526fb29..cd26e23394e81 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; @@ -42,6 +43,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; @@ -144,10 +146,11 @@ private static Map> getMergedIssuesToNodesMap( } public static class Response extends ActionResponse implements ToXContentObject { - static final Set RESERVED_NAMES = Set.of("cluster_settings", "node_settings", "index_settings"); + static final Set RESERVED_NAMES = Set.of("cluster_settings", "node_settings", "index_settings", "data_streams"); private final List clusterSettingsIssues; private final List nodeSettingsIssues; private final Map> indexSettingsIssues; + private final Map> dataStreamIssues; private final Map> pluginSettingsIssues; public Response(StreamInput in) throws IOException { @@ -155,6 +158,11 @@ public Response(StreamInput in) throws IOException { clusterSettingsIssues = in.readCollectionAsList(DeprecationIssue::new); nodeSettingsIssues = in.readCollectionAsList(DeprecationIssue::new); indexSettingsIssues = in.readMapOfLists(DeprecationIssue::new); + if (in.getTransportVersion().onOrAfter(TransportVersions.DATA_STREAM_INDEX_VERSION_DEPRECATION_CHECK)) { + dataStreamIssues = in.readMapOfLists(DeprecationIssue::new); + } else { + dataStreamIssues = Map.of(); + } if (in.getTransportVersion().before(TransportVersions.V_7_11_0)) { List mlIssues = in.readCollectionAsList(DeprecationIssue::new); pluginSettingsIssues = new HashMap<>(); @@ -168,11 +176,13 @@ public Response( List clusterSettingsIssues, List nodeSettingsIssues, Map> indexSettingsIssues, + Map> dataStreamIssues, Map> pluginSettingsIssues ) { this.clusterSettingsIssues = clusterSettingsIssues; this.nodeSettingsIssues = nodeSettingsIssues; this.indexSettingsIssues = indexSettingsIssues; + this.dataStreamIssues = dataStreamIssues; Set intersection = Sets.intersection(RESERVED_NAMES, pluginSettingsIssues.keySet()); if (intersection.isEmpty() == false) { throw new ElasticsearchStatusException( @@ -205,6 +215,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(clusterSettingsIssues); out.writeCollection(nodeSettingsIssues); out.writeMap(indexSettingsIssues, StreamOutput::writeCollection); + if (out.getTransportVersion().onOrAfter(TransportVersions.DATA_STREAM_INDEX_VERSION_DEPRECATION_CHECK)) { + out.writeMap(dataStreamIssues, StreamOutput::writeCollection); + } if (out.getTransportVersion().before(TransportVersions.V_7_11_0)) { out.writeCollection(pluginSettingsIssues.getOrDefault("ml_settings", Collections.emptyList())); } else { @@ -219,6 +232,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws .array("node_settings", nodeSettingsIssues.toArray()) .field("index_settings") .map(indexSettingsIssues) + .field("data_streams") + .map(dataStreamIssues) .mapContents(pluginSettingsIssues) .endObject(); } @@ -260,6 +275,7 @@ public static DeprecationInfoAction.Response from( Request request, NodesDeprecationCheckResponse nodeDeprecationResponse, List> indexSettingsChecks, + List> dataStreamChecks, List> clusterSettingsChecks, Map> pluginSettingIssues, List skipTheseDeprecatedSettings @@ -283,6 +299,19 @@ public static DeprecationInfoAction.Response from( } } + List dataStreamNames = indexNameExpressionResolver.dataStreamNames( + state, + IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN + ); + Map> dataStreamIssues = new HashMap<>(); + for (String dataStreamName : dataStreamNames) { + DataStream dataStream = stateWithSkippedSettingsRemoved.metadata().dataStreams().get(dataStreamName); + List issuesForSingleDataStream = filterChecks(dataStreamChecks, c -> c.apply(dataStream, state)); + if (issuesForSingleDataStream.isEmpty() == false) { + dataStreamIssues.put(dataStreamName, issuesForSingleDataStream); + } + } + // WORKAROUND: move transform deprecation issues into cluster_settings List transformDeprecations = pluginSettingIssues.remove( TransformDeprecationChecker.TRANSFORM_DEPRECATION_KEY @@ -291,7 +320,13 @@ public static DeprecationInfoAction.Response from( clusterSettingsIssues.addAll(transformDeprecations); } - return new DeprecationInfoAction.Response(clusterSettingsIssues, nodeSettingsIssues, indexSettingsIssues, pluginSettingIssues); + return new DeprecationInfoAction.Response( + clusterSettingsIssues, + nodeSettingsIssues, + indexSettingsIssues, + dataStreamIssues, + pluginSettingIssues + ); } } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java index 91e77762870bf..683c29815399b 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java @@ -36,6 +36,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.xpack.deprecation.DeprecationChecks.CLUSTER_SETTINGS_CHECKS; +import static org.elasticsearch.xpack.deprecation.DeprecationChecks.DATA_STREAM_CHECKS; import static org.elasticsearch.xpack.deprecation.DeprecationChecks.INDEX_SETTINGS_CHECKS; public class TransportDeprecationInfoAction extends TransportMasterNodeReadAction< @@ -134,6 +135,7 @@ protected final void masterOperation( request, response, INDEX_SETTINGS_CHECKS, + DATA_STREAM_CHECKS, CLUSTER_SETTINGS_CHECKS, deprecationIssues, skipTheseDeprecations diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecksTests.java new file mode 100644 index 0000000000000..d5325fb0ff3a4 --- /dev/null +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecksTests.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.deprecation; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamOptions; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.deprecation.DeprecationChecks.DATA_STREAM_CHECKS; +import static org.hamcrest.Matchers.equalTo; + +public class DataStreamDeprecationChecksTests extends ESTestCase { + + public void testOldIndicesCheck() { + long oldIndexCount = randomIntBetween(1, 100); + long newIndexCount = randomIntBetween(1, 100); + long oldSearchableSnapshotCount = 0; + long oldFullyManagedSearchableSnapshotCount = 0; + long oldPartiallyManagedSearchableSnapshotCount = 0; + List allIndices = new ArrayList<>(); + Map nameToIndexMetadata = new HashMap<>(); + for (int i = 0; i < oldIndexCount; i++) { + Settings.Builder settingsBuilder = settings(IndexVersion.fromId(7170099)); + if (randomBoolean()) { + settingsBuilder.put("index.store.type", "snapshot"); + if (randomBoolean()) { + oldFullyManagedSearchableSnapshotCount++; + } else { + settingsBuilder.put("index.store.snapshot.partial", true); + oldPartiallyManagedSearchableSnapshotCount++; + } + oldSearchableSnapshotCount++; + } + IndexMetadata oldIndexMetadata = IndexMetadata.builder("old-data-stream-index-" + i) + .settings(settingsBuilder) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + allIndices.add(oldIndexMetadata.getIndex()); + nameToIndexMetadata.put(oldIndexMetadata.getIndex().getName(), oldIndexMetadata); + } + for (int i = 0; i < newIndexCount; i++) { + Settings.Builder settingsBuilder = settings(IndexVersion.current()); + if (randomBoolean()) { + settingsBuilder.put("index.store.type", "snapshot"); + } + IndexMetadata newIndexMetadata = IndexMetadata.builder("new-data-stream-index-" + i) + .settings(settingsBuilder) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + allIndices.add(newIndexMetadata.getIndex()); + nameToIndexMetadata.put(newIndexMetadata.getIndex().getName(), newIndexMetadata); + } + DataStream dataStream = new DataStream( + randomAlphaOfLength(10), + allIndices, + randomNegativeLong(), + Map.of(), + randomBoolean(), + false, + false, + randomBoolean(), + randomFrom(IndexMode.values()), + null, + randomFrom(DataStreamOptions.EMPTY, DataStreamOptions.FAILURE_STORE_DISABLED, DataStreamOptions.FAILURE_STORE_ENABLED, null), + List.of(), + randomBoolean(), + null + ); + Metadata metadata = Metadata.builder().indices(nameToIndexMetadata).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); + DeprecationIssue expected = new DeprecationIssue( + DeprecationIssue.Level.CRITICAL, + "Old data stream with a compatibility version < 8.0", + "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html", + "This data stream has backing indices that were created before Elasticsearch 8.0.0", + false, + Map.of( + "backing_indices", + Map.of( + "count", + oldIndexCount + newIndexCount, + "need_upgrading", + Map.of( + "count", + oldIndexCount, + "searchable_snapshots", + Map.of( + "count", + oldSearchableSnapshotCount, + "fully_mounted", + Map.of("count", oldFullyManagedSearchableSnapshotCount), + "partially_mounted", + Map.of("count", oldPartiallyManagedSearchableSnapshotCount) + ) + ) + ) + ) + ); + List issues = DeprecationChecks.filterChecks(DATA_STREAM_CHECKS, c -> c.apply(dataStream, clusterState)); + assertThat(issues, equalTo(singletonList(expected))); + } +} diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java index 480ac2103fbfa..5750daa8e3673 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; @@ -36,7 +37,9 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -63,6 +66,13 @@ protected DeprecationInfoAction.Response createTestInstance() { .collect(Collectors.toList()); indexIssues.put(randomAlphaOfLength(10), perIndexIssues); } + Map> dataStreamIssues = new HashMap<>(); + for (int i = 0; i < randomIntBetween(0, 10); i++) { + List perDataStreamIssues = Stream.generate(DeprecationInfoActionResponseTests::createTestDeprecationIssue) + .limit(randomIntBetween(0, 10)) + .collect(Collectors.toList()); + dataStreamIssues.put(randomAlphaOfLength(10), perDataStreamIssues); + } Map> pluginIssues = new HashMap<>(); for (int i = 0; i < randomIntBetween(0, 10); i++) { List perPluginIssues = Stream.generate(DeprecationInfoActionResponseTests::createTestDeprecationIssue) @@ -70,7 +80,7 @@ protected DeprecationInfoAction.Response createTestInstance() { .collect(Collectors.toList()); pluginIssues.put(randomAlphaOfLength(10), perPluginIssues); } - return new DeprecationInfoAction.Response(clusterIssues, nodeIssues, indexIssues, pluginIssues); + return new DeprecationInfoAction.Response(clusterIssues, nodeIssues, indexIssues, dataStreamIssues, pluginIssues); } @Override @@ -104,9 +114,13 @@ public void testFrom() throws IOException { boolean clusterIssueFound = randomBoolean(); boolean nodeIssueFound = randomBoolean(); boolean indexIssueFound = randomBoolean(); + boolean dataStreamIssueFound = randomBoolean(); DeprecationIssue foundIssue = createTestDeprecationIssue(); List> clusterSettingsChecks = List.of((s) -> clusterIssueFound ? foundIssue : null); List> indexSettingsChecks = List.of((idx) -> indexIssueFound ? foundIssue : null); + List> dataStreamChecks = List.of( + (ds, cs) -> dataStreamIssueFound ? foundIssue : null + ); NodesDeprecationCheckResponse nodeDeprecationIssues = new NodesDeprecationCheckResponse( new ClusterName(randomAlphaOfLength(5)), @@ -125,6 +139,7 @@ public void testFrom() throws IOException { request, nodeDeprecationIssues, indexSettingsChecks, + dataStreamChecks, clusterSettingsChecks, Collections.emptyMap(), Collections.emptyList() @@ -197,6 +212,7 @@ public void testFromWithMergeableNodeIssues() throws IOException { DeprecationIssue foundIssue2 = createTestDeprecationIssue(foundIssue1, metaMap2); List> clusterSettingsChecks = Collections.emptyList(); List> indexSettingsChecks = List.of((idx) -> null); + List> dataStreamChecks = List.of((ds, cs) -> null); NodesDeprecationCheckResponse nodeDeprecationIssues = new NodesDeprecationCheckResponse( new ClusterName(randomAlphaOfLength(5)), @@ -214,6 +230,7 @@ public void testFromWithMergeableNodeIssues() throws IOException { request, nodeDeprecationIssues, indexSettingsChecks, + dataStreamChecks, clusterSettingsChecks, Collections.emptyMap(), Collections.emptyList() @@ -239,8 +256,15 @@ public void testRemoveSkippedSettings() throws IOException { settingsBuilder.put("some.undeprecated.property", "someValue3"); settingsBuilder.putList("some.undeprecated.list.property", List.of("someValue4", "someValue5")); Settings inputSettings = settingsBuilder.build(); + IndexMetadata dataStreamIndexMetadata = IndexMetadata.builder("ds-test-index-1") + .settings(inputSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); Metadata metadata = Metadata.builder() .put(IndexMetadata.builder("test").settings(inputSettings).numberOfShards(1).numberOfReplicas(0)) + .put(dataStreamIndexMetadata, true) + .put(DataStream.builder("ds-test", List.of(dataStreamIndexMetadata.getIndex())).build()) .persistentSettings(inputSettings) .build(); @@ -256,6 +280,13 @@ public void testRemoveSkippedSettings() throws IOException { visibleIndexSettings.set(idx.getSettings()); return null; })); + AtomicInteger backingIndicesCount = new AtomicInteger(0); + List> dataStreamChecks = Collections.unmodifiableList( + Arrays.asList((ds, cs) -> { + backingIndicesCount.set(ds.getIndices().size()); + return null; + }) + ); NodesDeprecationCheckResponse nodeDeprecationIssues = new NodesDeprecationCheckResponse( new ClusterName(randomAlphaOfLength(5)), @@ -270,6 +301,7 @@ public void testRemoveSkippedSettings() throws IOException { request, nodeDeprecationIssues, indexSettingsChecks, + dataStreamChecks, clusterSettingsChecks, Collections.emptyMap(), List.of("some.deprecated.property", "some.other.*.deprecated.property") @@ -288,19 +320,30 @@ public void testRemoveSkippedSettings() throws IOException { Assert.assertTrue(resultIndexSettings.getAsList("some.undeprecated.list.property").equals(List.of("someValue4", "someValue5"))); Assert.assertFalse(resultIndexSettings.hasValue("some.deprecated.property")); Assert.assertFalse(resultIndexSettings.hasValue("some.other.bad.deprecated.property")); + + assertThat(backingIndicesCount.get(), equalTo(1)); } public void testCtorFailure() { Map> indexNames = Stream.generate(() -> randomAlphaOfLength(10)) .limit(10) .collect(Collectors.toMap(Function.identity(), (_k) -> Collections.emptyList())); + Map> dataStreamNames = Stream.generate(() -> randomAlphaOfLength(10)) + .limit(10) + .collect(Collectors.toMap(Function.identity(), (_k) -> Collections.emptyList())); Set shouldCauseFailure = new HashSet<>(RESERVED_NAMES); for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) { Map> pluginSettingsIssues = randomSubsetOf(3, shouldCauseFailure).stream() .collect(Collectors.toMap(Function.identity(), (_k) -> Collections.emptyList())); expectThrows( ElasticsearchStatusException.class, - () -> new DeprecationInfoAction.Response(Collections.emptyList(), Collections.emptyList(), indexNames, pluginSettingsIssues) + () -> new DeprecationInfoAction.Response( + Collections.emptyList(), + Collections.emptyList(), + indexNames, + dataStreamNames, + pluginSettingsIssues + ) ); } } diff --git a/x-pack/plugin/enrich/qa/rest/build.gradle b/x-pack/plugin/enrich/qa/rest/build.gradle index 064e362c77e6c..f96eff5f933c4 100644 --- a/x-pack/plugin/enrich/qa/rest/build.gradle +++ b/x-pack/plugin/enrich/qa/rest/build.gradle @@ -33,7 +33,3 @@ testClusters.configureEach { requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.4.0") } -tasks.named("yamlRestCompatTestTransform").configure({ task -> - task.skipTest("enrich/10_basic/Test using the deprecated elasticsearch_version field results in a warning", "The deprecation message was changed") -}) - diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java index b126ca8af0e31..4863eea5d5ca3 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.hamcrest.Matcher; import org.junit.After; @@ -363,12 +364,12 @@ public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimens final List fields = new ArrayList<>(); fields.add(new SortedNumericDocValuesField(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); fields.add(new LongPoint(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); - final TimeSeriesIdFieldMapper.TimeSeriesIdBuilder builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + var routingPathFields = new RoutingPathFields(null); for (int i = 0; i < dimensions.length; i += 2) { if (dimensions[i + 1] instanceof Number n) { - builder.addLong(dimensions[i].toString(), n.longValue()); + routingPathFields.addLong(dimensions[i].toString(), n.longValue()); } else { - builder.addString(dimensions[i].toString(), dimensions[i + 1].toString()); + routingPathFields.addString(dimensions[i].toString(), dimensions[i + 1].toString()); fields.add(new SortedSetDocValuesField(dimensions[i].toString(), new BytesRef(dimensions[i + 1].toString()))); } } @@ -382,7 +383,9 @@ public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimens } } // Use legacy tsid to make tests easier to understand: - fields.add(new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, builder.buildLegacyTsid().toBytesRef())); + fields.add( + new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef()) + ); iw.addDocument(fields); } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index bd8bd0f688837..7adafa908ce4f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -471,7 +472,7 @@ public enum Type { return null; } Instant parsed = DateFormatters.from(ISO_DATE_WITH_NANOS.parse(x)).toInstant(); - return parsed.getEpochSecond() * 1_000_000_000 + parsed.getNano(); + return DateUtils.toLong(parsed); }, (l, r) -> l instanceof Long maybeIP ? maybeIP.compareTo((Long) r) : l.toString().compareTo(r.toString()), Long.class), BOOLEAN(Booleans::parseBoolean, Boolean.class), GEO_POINT(x -> x == null ? null : GEO.wktToWkb(x), BytesRef.class), diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index 2bd7ecc37b034..478c68db68aa7 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -64,6 +64,9 @@ public class CsvTestsDataLoader { private static final TestsDataset SAMPLE_DATA_TS_LONG = SAMPLE_DATA.withIndex("sample_data_ts_long") .withData("sample_data_ts_long.csv") .withTypeMapping(Map.of("@timestamp", "long")); + private static final TestsDataset SAMPLE_DATA_TS_NANOS = SAMPLE_DATA.withIndex("sample_data_ts_nanos") + .withData("sample_data_ts_nanos.csv") + .withTypeMapping(Map.of("@timestamp", "date_nanos")); private static final TestsDataset MISSING_IP_SAMPLE_DATA = new TestsDataset("missing_ip_sample_data"); private static final TestsDataset CLIENT_IPS = new TestsDataset("clientips"); private static final TestsDataset CLIENT_CIDR = new TestsDataset("client_cidr"); @@ -101,6 +104,7 @@ public class CsvTestsDataLoader { Map.entry(ALERTS.indexName, ALERTS), Map.entry(SAMPLE_DATA_STR.indexName, SAMPLE_DATA_STR), Map.entry(SAMPLE_DATA_TS_LONG.indexName, SAMPLE_DATA_TS_LONG), + Map.entry(SAMPLE_DATA_TS_NANOS.indexName, SAMPLE_DATA_TS_NANOS), Map.entry(MISSING_IP_SAMPLE_DATA.indexName, MISSING_IP_SAMPLE_DATA), Map.entry(CLIENT_IPS.indexName, CLIENT_IPS), Map.entry(CLIENT_CIDR.indexName, CLIENT_CIDR), diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_nanos.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_nanos.csv new file mode 100644 index 0000000000000..eb947f27cc1ee --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_nanos.csv @@ -0,0 +1,8 @@ +@timestamp:date_nanos,client_ip:ip,event_duration:long,message:keyword +2023-10-23T13:55:01.543123456Z,172.21.3.15,1756467,Connected to 10.1.0.1 +2023-10-23T13:53:55.832123456Z,172.21.3.15,5033755,Connection error +2023-10-23T13:52:55.015123456Z,172.21.3.15,8268153,Connection error +2023-10-23T13:51:54.732123456Z,172.21.3.15,725448,Connection error +2023-10-23T13:33:34.937123456Z,172.21.0.5,1232382,Disconnected +2023-10-23T12:27:28.948123456Z,172.21.2.113,2764889,Connected to 10.1.0.2 +2023-10-23T12:15:03.360123456Z,172.21.2.162,3450233,Connected to 10.1.0.3 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index de5981df999c7..963245f9f0ea6 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -1655,8 +1655,9 @@ repeat required_capability: repeat // tag::repeat[] ROW a = "Hello!" -| EVAL triple_a = REPEAT(a, 3); +| EVAL triple_a = REPEAT(a, 3) // end::repeat[] +; // tag::repeat-result[] a:keyword | triple_a:keyword diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec index a51e4fe995fb3..a2fd3f3d5e0da 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec @@ -44,6 +44,44 @@ FROM sample_data_ts_long 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; +singleIndexTsNanosAsMillis +required_capability: to_date_nanos + +FROM sample_data_ts_nanos +| EVAL @timestamp = TO_DATETIME(@timestamp) +| KEEP @timestamp, client_ip, event_duration, message +| SORT @timestamp DESC +; + +@timestamp:date | client_ip:ip | event_duration:long | message:keyword +2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +singleIndexTsMillisAsNanos +required_capability: to_date_nanos + +FROM sample_data +| EVAL @timestamp = TO_DATE_NANOS(@timestamp) +| KEEP @timestamp, client_ip, event_duration, message +| SORT @timestamp DESC +; + +@timestamp:date_nanos | client_ip:ip | event_duration:long | message:keyword +2023-10-23T13:55:01.543000000Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +2023-10-23T13:53:55.832000000Z | 172.21.3.15 | 5033755 | Connection error +2023-10-23T13:52:55.015000000Z | 172.21.3.15 | 8268153 | Connection error +2023-10-23T13:51:54.732000000Z | 172.21.3.15 | 725448 | Connection error +2023-10-23T13:33:34.937000000Z | 172.21.0.5 | 1232382 | Disconnected +2023-10-23T12:27:28.948000000Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +2023-10-23T12:15:03.360000000Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + singleIndexIpStats required_capability: casting_operator @@ -529,6 +567,92 @@ sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; + +multiIndexTsNanosRename +required_capability: to_date_nanos +required_capability: union_types +required_capability: metadata_fields +required_capability: union_types_remove_fields + +FROM sample_data, sample_data_ts_nanos METADATA _index +| EVAL ts = TO_DATETIME(@timestamp) +| KEEP _index, ts, client_ip, event_duration, message +| SORT _index ASC, ts DESC +; + +_index:keyword | ts:date | client_ip:ip | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_nanos | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_nanos | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_nanos | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_nanos | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_nanos | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_nanos | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +multiIndexTsNanosRenameToNanos +required_capability: to_date_nanos +required_capability: union_types +required_capability: metadata_fields +required_capability: union_types_remove_fields + +FROM sample_data, sample_data_ts_nanos METADATA _index +| EVAL ts = TO_DATE_NANOS(@timestamp) +| KEEP _index, ts, client_ip, event_duration, message +| SORT _index ASC, ts DESC +; + +_index:keyword | ts:date_nanos | client_ip:ip | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543000000Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832000000Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015000000Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732000000Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937000000Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948000000Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360000000Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_nanos | 2023-10-23T13:55:01.543123456Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2023-10-23T13:53:55.832123456Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_nanos | 2023-10-23T13:52:55.015123456Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_nanos | 2023-10-23T13:51:54.732123456Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_nanos | 2023-10-23T13:33:34.937123456Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_nanos | 2023-10-23T12:27:28.948123456Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_nanos | 2023-10-23T12:15:03.360123456Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +multiIndexTsNanosRenameToNanosWithFiltering +required_capability: to_date_nanos +required_capability: date_nanos_binary_comparison +required_capability: union_types +required_capability: metadata_fields +required_capability: union_types_remove_fields + +FROM sample_data, sample_data_ts_nanos METADATA _index +| EVAL ts = TO_DATE_NANOS(@timestamp) +| WHERE ts > TO_DATE_NANOS("2023-10-23T13:00:00Z") +| KEEP _index, ts, client_ip, event_duration, message +| SORT _index ASC, ts DESC +; + +_index:keyword | ts:date_nanos | client_ip:ip | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543000000Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832000000Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015000000Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732000000Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937000000Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_nanos | 2023-10-23T13:55:01.543123456Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2023-10-23T13:53:55.832123456Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_nanos | 2023-10-23T13:52:55.015123456Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_nanos | 2023-10-23T13:51:54.732123456Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_nanos | 2023-10-23T13:33:34.937123456Z | 172.21.0.5 | 1232382 | Disconnected +; + multiIndexTsLongRenameToString required_capability: union_types required_capability: metadata_fields @@ -591,24 +715,57 @@ count:long | @timestamp:date 4 | 2023-10-23T12:00:00.000Z ; +multiIndexTsNanosToDatetimeStats +required_capability: union_types +required_capability: union_types_remove_fields + +FROM sample_data, sample_data_ts_nanos +| EVAL @timestamp = DATE_TRUNC(1 hour, TO_DATETIME(@timestamp)) +| STATS count=count(*) BY @timestamp +| SORT count DESC, @timestamp ASC +| KEEP count, @timestamp +; + +count:long | @timestamp:date +10 | 2023-10-23T13:00:00.000Z +4 | 2023-10-23T12:00:00.000Z +; + +Multi Index millis to nanos stats +required_capability: union_types +required_capability: union_types_remove_fields + +FROM sample_data, sample_data_ts_nanos +| EVAL @timestamp = DATE_TRUNC(1 hour, TO_DATE_NANOS(@timestamp)) +| STATS count=count(*) BY @timestamp +| SORT count DESC, @timestamp ASC +| KEEP count, @timestamp +; + +count:long | @timestamp:date_nanos +10 | 2023-10-23T13:00:00.000Z +4 | 2023-10-23T12:00:00.000Z +; + + multiIndexTsLongStatsDrop required_capability: union_types required_capability: union_types_agg_cast required_capability: casting_operator -FROM sample_data, sample_data_ts_long +FROM sample_data, sample_data_ts_long, sample_data_ts_nanos | STATS count=count(*) BY @timestamp::datetime | KEEP count ; count:long -2 -2 -2 -2 -2 -2 -2 +3 +3 +3 +3 +3 +3 +3 ; multiIndexTsLongStatsInline2 @@ -616,19 +773,19 @@ required_capability: union_types required_capability: union_types_agg_cast required_capability: casting_operator -FROM sample_data, sample_data_ts_long +FROM sample_data, sample_data_ts_long, sample_data_ts_nanos | STATS count=count(*) BY @timestamp::datetime | SORT count DESC, `@timestamp::datetime` DESC ; count:long | @timestamp::datetime:datetime -2 | 2023-10-23T13:55:01.543Z -2 | 2023-10-23T13:53:55.832Z -2 | 2023-10-23T13:52:55.015Z -2 | 2023-10-23T13:51:54.732Z -2 | 2023-10-23T13:33:34.937Z -2 | 2023-10-23T12:27:28.948Z -2 | 2023-10-23T12:15:03.360Z +3 | 2023-10-23T13:55:01.543Z +3 | 2023-10-23T13:53:55.832Z +3 | 2023-10-23T13:52:55.015Z +3 | 2023-10-23T13:51:54.732Z +3 | 2023-10-23T13:33:34.937Z +3 | 2023-10-23T12:27:28.948Z +3 | 2023-10-23T12:15:03.360Z ; multiIndexTsLongStatsInline3 @@ -765,28 +922,35 @@ FROM sample_data* METADATA _index | SORT _index ASC, @timestamp DESC ; -_index:keyword | @timestamp:date | client_ip:ip | event_duration:long | message:keyword -sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 -sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 -sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +_index:keyword | @timestamp:date | client_ip:ip | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_nanos | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_nanos | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_nanos | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_nanos | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_nanos | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_nanos | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; multiIndexIpStringTsLongDropped @@ -799,28 +963,35 @@ FROM sample_data* METADATA _index | SORT _index ASC, event_duration ASC ; -_index:keyword | event_duration:long | message:keyword -sample_data | 725448 | Connection error -sample_data | 1232382 | Disconnected -sample_data | 1756467 | Connected to 10.1.0.1 -sample_data | 2764889 | Connected to 10.1.0.2 -sample_data | 3450233 | Connected to 10.1.0.3 -sample_data | 5033755 | Connection error -sample_data | 8268153 | Connection error -sample_data_str | 725448 | Connection error -sample_data_str | 1232382 | Disconnected -sample_data_str | 1756467 | Connected to 10.1.0.1 -sample_data_str | 2764889 | Connected to 10.1.0.2 -sample_data_str | 3450233 | Connected to 10.1.0.3 -sample_data_str | 5033755 | Connection error -sample_data_str | 8268153 | Connection error -sample_data_ts_long | 725448 | Connection error -sample_data_ts_long | 1232382 | Disconnected -sample_data_ts_long | 1756467 | Connected to 10.1.0.1 -sample_data_ts_long | 2764889 | Connected to 10.1.0.2 -sample_data_ts_long | 3450233 | Connected to 10.1.0.3 -sample_data_ts_long | 5033755 | Connection error -sample_data_ts_long | 8268153 | Connection error +_index:keyword | event_duration:long | message:keyword +sample_data | 725448 | Connection error +sample_data | 1232382 | Disconnected +sample_data | 1756467 | Connected to 10.1.0.1 +sample_data | 2764889 | Connected to 10.1.0.2 +sample_data | 3450233 | Connected to 10.1.0.3 +sample_data | 5033755 | Connection error +sample_data | 8268153 | Connection error +sample_data_str | 725448 | Connection error +sample_data_str | 1232382 | Disconnected +sample_data_str | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2764889 | Connected to 10.1.0.2 +sample_data_str | 3450233 | Connected to 10.1.0.3 +sample_data_str | 5033755 | Connection error +sample_data_str | 8268153 | Connection error +sample_data_ts_long | 725448 | Connection error +sample_data_ts_long | 1232382 | Disconnected +sample_data_ts_long | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 5033755 | Connection error +sample_data_ts_long | 8268153 | Connection error +sample_data_ts_nanos | 725448 | Connection error +sample_data_ts_nanos | 1232382 | Disconnected +sample_data_ts_nanos | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2764889 | Connected to 10.1.0.2 +sample_data_ts_nanos | 3450233 | Connected to 10.1.0.3 +sample_data_ts_nanos | 5033755 | Connection error +sample_data_ts_nanos | 8268153 | Connection error ; multiIndexIpStringTsLongRename @@ -834,28 +1005,35 @@ FROM sample_data* METADATA _index | SORT _index ASC, ts DESC ; -_index:keyword | ts:date | host_ip:ip | event_duration:long | message:keyword -sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 -sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 -sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +_index:keyword | ts:date | host_ip:ip | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_nanos | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_nanos | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_nanos | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_nanos | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_nanos | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_nanos | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; multiIndexIpStringTsLongRenameDropped @@ -868,28 +1046,35 @@ FROM sample_data* METADATA _index | SORT _index ASC, event_duration ASC ; -_index:keyword | event_duration:long | message:keyword -sample_data | 725448 | Connection error -sample_data | 1232382 | Disconnected -sample_data | 1756467 | Connected to 10.1.0.1 -sample_data | 2764889 | Connected to 10.1.0.2 -sample_data | 3450233 | Connected to 10.1.0.3 -sample_data | 5033755 | Connection error -sample_data | 8268153 | Connection error -sample_data_str | 725448 | Connection error -sample_data_str | 1232382 | Disconnected -sample_data_str | 1756467 | Connected to 10.1.0.1 -sample_data_str | 2764889 | Connected to 10.1.0.2 -sample_data_str | 3450233 | Connected to 10.1.0.3 -sample_data_str | 5033755 | Connection error -sample_data_str | 8268153 | Connection error -sample_data_ts_long | 725448 | Connection error -sample_data_ts_long | 1232382 | Disconnected -sample_data_ts_long | 1756467 | Connected to 10.1.0.1 -sample_data_ts_long | 2764889 | Connected to 10.1.0.2 -sample_data_ts_long | 3450233 | Connected to 10.1.0.3 -sample_data_ts_long | 5033755 | Connection error -sample_data_ts_long | 8268153 | Connection error +_index:keyword | event_duration:long | message:keyword +sample_data | 725448 | Connection error +sample_data | 1232382 | Disconnected +sample_data | 1756467 | Connected to 10.1.0.1 +sample_data | 2764889 | Connected to 10.1.0.2 +sample_data | 3450233 | Connected to 10.1.0.3 +sample_data | 5033755 | Connection error +sample_data | 8268153 | Connection error +sample_data_str | 725448 | Connection error +sample_data_str | 1232382 | Disconnected +sample_data_str | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2764889 | Connected to 10.1.0.2 +sample_data_str | 3450233 | Connected to 10.1.0.3 +sample_data_str | 5033755 | Connection error +sample_data_str | 8268153 | Connection error +sample_data_ts_long | 725448 | Connection error +sample_data_ts_long | 1232382 | Disconnected +sample_data_ts_long | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 5033755 | Connection error +sample_data_ts_long | 8268153 | Connection error +sample_data_ts_nanos | 725448 | Connection error +sample_data_ts_nanos | 1232382 | Disconnected +sample_data_ts_nanos | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2764889 | Connected to 10.1.0.2 +sample_data_ts_nanos | 3450233 | Connected to 10.1.0.3 +sample_data_ts_nanos | 5033755 | Connection error +sample_data_ts_nanos | 8268153 | Connection error ; multiIndexIpStringTsLongRenameToString @@ -903,28 +1088,35 @@ FROM sample_data* METADATA _index | SORT _index ASC, ts DESC ; -_index:keyword | ts:keyword | host_ip:keyword | event_duration:long | message:keyword -sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 -sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 -sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +_index:keyword | ts:keyword | host_ip:keyword | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_nanos | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_nanos | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_nanos | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_nanos | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_nanos | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_nanos | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; multiIndexWhereIpStringTsLong @@ -1002,10 +1194,11 @@ FROM sample_data* METADATA _index | SORT _index ASC, ts DESC ; -@timestamp:null | client_ip:null | event_duration:long | message:keyword | _index:keyword | ts:date | ts_str:keyword | ts_l:long | ip:ip | ip_str:k -null | null | 8268153 | Connection error | sample_data | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 -null | null | 8268153 | Connection error | sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 -null | null | 8268153 | Connection error | sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 +@timestamp:null | client_ip:null | event_duration:long | message:keyword | _index:keyword | ts:date | ts_str:keyword | ts_l:long | ip:ip | ip_str:k +null | null | 8268153 | Connection error | sample_data | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 +null | null | 8268153 | Connection error | sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 +null | null | 8268153 | Connection error | sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 +null | null | 8268153 | Connection error | sample_data_ts_nanos | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015123456Z | 1698069175015123456 | 172.21.3.15 | 172.21.3.15 ; multiIndexMultiColumnTypesRenameAndKeep @@ -1020,10 +1213,11 @@ FROM sample_data* METADATA _index | SORT _index ASC, ts DESC ; -_index:keyword | ts:date | ts_str:keyword | ts_l:long | ip:ip | ip_str:k | event_duration:long -sample_data | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 -sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 -sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 +_index:keyword | ts:date | ts_str:keyword | ts_l:long | ip:ip | ip_str:k | event_duration:long +sample_data | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 +sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 +sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 +sample_data_ts_nanos | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015123456Z | 1698069175015123456 | 172.21.3.15 | 172.21.3.15 | 8268153 ; multiIndexMultiColumnTypesRenameAndDrop @@ -1038,10 +1232,11 @@ FROM sample_data* METADATA _index | SORT _index ASC, ts DESC ; -event_duration:long | _index:keyword | ts:date | ts_str:keyword | ts_l:long | ip:ip | ip_str:k -8268153 | sample_data | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 -8268153 | sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 -8268153 | sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 +event_duration:long | _index:keyword | ts:date | ts_str:keyword | ts_l:long | ip:ip | ip_str:k +8268153 | sample_data | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 +8268153 | sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 +8268153 | sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 +8268153 | sample_data_ts_nanos | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015123456Z | 1698069175015123456 | 172.21.3.15 | 172.21.3.15 ; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java index 2419aa83845a8..286ddbaa29a5b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java @@ -45,6 +45,7 @@ import org.elasticsearch.compute.operator.lookup.MergePositionsOperator; import org.elasticsearch.compute.operator.lookup.QueryList; import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -185,7 +186,7 @@ protected static QueryList termQueryList( return switch (inputDataType) { case IP -> QueryList.ipTermQueryList(field, searchExecutionContext, (BytesRefBlock) block); case DATETIME -> QueryList.dateTermQueryList(field, searchExecutionContext, (LongBlock) block); - default -> QueryList.rawTermQueryList(field, searchExecutionContext, block); + case null, default -> QueryList.rawTermQueryList(field, searchExecutionContext, block); }; } @@ -459,6 +460,10 @@ abstract static class Request { abstract static class TransportRequest extends org.elasticsearch.transport.TransportRequest implements IndicesRequest { final String sessionId; final ShardId shardId; + /** + * For mixed clusters with nodes <8.14, this will be null. + */ + @Nullable final DataType inputDataType; final Page inputPage; final List extractFields; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index f24a16bb63697..2d85b46e33a8c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -127,9 +127,9 @@ static TransportRequest readFrom(StreamInput in, BlockFactory blockFactory) thro TaskId parentTaskId = TaskId.readFromStream(in); String sessionId = in.readString(); ShardId shardId = new ShardId(in); - DataType inputDataType = DataType.fromTypeName( - (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) ? in.readString() : "unknown" - ); + DataType inputDataType = (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) + ? DataType.fromTypeName(in.readString()) + : null; String matchType = in.readString(); String matchField = in.readString(); Page inputPage; diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java index b43d87c17e644..ec04bfdd058f9 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java @@ -645,7 +645,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio } if (dimension && numericValue != null) { - context.getDimensions().addUnsignedLong(fieldType().name(), numericValue).validate(context.indexSettings()); + context.getRoutingFields().addUnsignedLong(fieldType().name(), numericValue); } List fields = new ArrayList<>(); diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityWithBasicLicenseIT.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityWithBasicLicenseIT.java index 3dd678046ea5f..324850f158268 100644 --- a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityWithBasicLicenseIT.java +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityWithBasicLicenseIT.java @@ -29,7 +29,6 @@ public class SecurityWithBasicLicenseIT extends SecurityInBasicRestTestCase { - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99169") public void testWithBasicLicense() throws Exception { checkLicenseType("basic"); checkSecurityEnabled(false); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java index 9e36055e917a6..5be00ae3bfa0c 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.integration; import org.apache.logging.log4j.Logger; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -20,6 +21,7 @@ import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; @@ -42,6 +44,7 @@ import org.junit.After; import java.io.ByteArrayInputStream; +import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -73,6 +76,7 @@ /** * Tests that file settings service can properly add role mappings. */ +@LuceneTestCase.SuppressFileSystems("*") public class RoleMappingFileSettingsIT extends NativeRealmIntegTestCase { private static AtomicLong versionCounter = new AtomicLong(1); @@ -154,32 +158,37 @@ public void cleanUp() { updateClusterSettings(Settings.builder().putNull("indices.recovery.max_bytes_per_sec")); } - public static void writeJSONFile(String node, String json, Logger logger, AtomicLong versionCounter) throws Exception { - writeJSONFile(node, json, logger, versionCounter, true); - } - - public static void writeJSONFileWithoutVersionIncrement(String node, String json, Logger logger, AtomicLong versionCounter) - throws Exception { - writeJSONFile(node, json, logger, versionCounter, false); - } - - private static void writeJSONFile(String node, String json, Logger logger, AtomicLong versionCounter, boolean incrementVersion) - throws Exception { - long version = incrementVersion ? versionCounter.incrementAndGet() : versionCounter.get(); - + public static void writeJSONFile(String node, String json, Logger logger, Long version) throws Exception { FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); - assertTrue(fileSettingsService.watching()); - - Files.deleteIfExists(fileSettingsService.watchedFile()); Files.createDirectories(fileSettingsService.watchedFileDir()); Path tempFilePath = createTempFile(); + String jsonWithVersion = Strings.format(json, version); logger.info("--> before writing JSON config to node {} with path {}", node, tempFilePath); - logger.info(Strings.format(json, version)); - Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); - Files.move(tempFilePath, fileSettingsService.watchedFile(), StandardCopyOption.ATOMIC_MOVE); - logger.info("--> after writing JSON config to node {} with path {}", node, tempFilePath); + logger.info(jsonWithVersion); + + Files.writeString(tempFilePath, jsonWithVersion); + int retryCount = 0; + do { + try { + // this can fail on Windows because of timing + Files.move(tempFilePath, fileSettingsService.watchedFile(), StandardCopyOption.ATOMIC_MOVE); + logger.info("--> after writing JSON config to node {} with path {}", node, tempFilePath); + return; + } catch (IOException e) { + logger.info("--> retrying writing a settings file [{}]", retryCount); + if (retryCount == 4) { // retry 5 times + throw e; + } + Thread.sleep(retryDelay(retryCount)); + retryCount++; + } + } while (true); + } + + private static long retryDelay(int retryCount) { + return 100 * (1 << retryCount) + Randomness.get().nextInt(10); } public static Tuple setupClusterStateListener(String node, String expectedKey) { @@ -320,7 +329,7 @@ public void testClusterStateRoleMappingsAddedThenDeleted() throws Exception { ensureGreen(); var savedClusterState = setupClusterStateListener(internalCluster().getMasterName(), "everyone_kibana"); - writeJSONFile(internalCluster().getMasterName(), testJSON, logger, versionCounter); + writeJSONFile(internalCluster().getMasterName(), testJSON, logger, versionCounter.incrementAndGet()); assertRoleMappingsSaveOK(savedClusterState.v1(), savedClusterState.v2()); logger.info("---> cleanup cluster settings..."); @@ -333,7 +342,7 @@ public void testClusterStateRoleMappingsAddedThenDeleted() throws Exception { savedClusterState = setupClusterStateListenerForCleanup(internalCluster().getMasterName()); - writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter); + writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter.incrementAndGet()); boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); @@ -373,7 +382,7 @@ public void testGetRoleMappings() throws Exception { } var savedClusterState = setupClusterStateListener(internalCluster().getMasterName(), "everyone_kibana"); - writeJSONFile(internalCluster().getMasterName(), testJSON, logger, versionCounter); + writeJSONFile(internalCluster().getMasterName(), testJSON, logger, versionCounter.incrementAndGet()); boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); @@ -415,7 +424,8 @@ public void testGetRoleMappings() throws Exception { ); savedClusterState = setupClusterStateListenerForCleanup(internalCluster().getMasterName()); - writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter); + String node = internalCluster().getMasterName(); + writeJSONFile(node, emptyJSON, logger, versionCounter.incrementAndGet()); awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); @@ -465,7 +475,7 @@ public void testErrorSaved() throws Exception { // save an empty file to clear any prior state, this ensures we don't get a stale file left over by another test var savedClusterState = setupClusterStateListenerForCleanup(internalCluster().getMasterName()); - writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter); + writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter.incrementAndGet()); boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); @@ -490,7 +500,8 @@ public void testErrorSaved() throws Exception { } ); - writeJSONFile(internalCluster().getMasterName(), testErrorJSON, logger, versionCounter); + String node = internalCluster().getMasterName(); + writeJSONFile(node, testErrorJSON, logger, versionCounter.incrementAndGet()); awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); @@ -515,7 +526,8 @@ public void testRoleMappingApplyWithSecurityIndexClosed() throws Exception { var closeIndexResponse = indicesAdmin().close(new CloseIndexRequest(INTERNAL_SECURITY_MAIN_INDEX_7)).get(); assertTrue(closeIndexResponse.isAcknowledged()); - writeJSONFile(internalCluster().getMasterName(), testJSON, logger, versionCounter); + String node = internalCluster().getMasterName(); + writeJSONFile(node, testJSON, logger, versionCounter.incrementAndGet()); boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); @@ -550,7 +562,8 @@ public void testRoleMappingApplyWithSecurityIndexClosed() throws Exception { } } finally { savedClusterState = setupClusterStateListenerForCleanup(internalCluster().getMasterName()); - writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter); + String node = internalCluster().getMasterName(); + writeJSONFile(node, emptyJSON, logger, versionCounter.incrementAndGet()); boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java index 15892c8d021f0..ef8f2cfc0d411 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java @@ -7,9 +7,11 @@ package org.elasticsearch.xpack.security; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.core.Tuple; +import org.elasticsearch.integration.RoleMappingFileSettingsIT; import org.elasticsearch.reservedstate.service.FileSettingsService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.SecurityIntegTestCase; @@ -29,12 +31,11 @@ import static org.elasticsearch.integration.RoleMappingFileSettingsIT.setupClusterStateListener; import static org.elasticsearch.integration.RoleMappingFileSettingsIT.setupClusterStateListenerForCleanup; -import static org.elasticsearch.integration.RoleMappingFileSettingsIT.writeJSONFile; -import static org.elasticsearch.integration.RoleMappingFileSettingsIT.writeJSONFileWithoutVersionIncrement; import static org.elasticsearch.xpack.core.security.authz.RoleMappingMetadata.METADATA_NAME_FIELD; import static org.hamcrest.Matchers.containsInAnyOrder; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +@LuceneTestCase.SuppressFileSystems("*") public class FileSettingsRoleMappingsRestartIT extends SecurityIntegTestCase { private static final int MAX_WAIT_TIME_SECONDS = 20; @@ -116,7 +117,7 @@ public void testReservedStatePersistsOnRestart() throws Exception { awaitFileSettingsWatcher(); logger.info("--> write some role mappings, no other file settings"); - writeJSONFile(masterNode, testJSONOnlyRoleMappings, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, testJSONOnlyRoleMappings, logger, versionCounter.incrementAndGet()); assertRoleMappingsInClusterStateWithAwait( savedClusterState, @@ -196,7 +197,7 @@ public void testFileSettingsReprocessedOnRestartWithoutVersionChange() throws Ex Tuple savedClusterState = setupClusterStateListener(masterNode, "everyone_kibana_alone"); awaitFileSettingsWatcher(); logger.info("--> write some role mappings, no other file settings"); - writeJSONFile(masterNode, testJSONOnlyRoleMappings, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, testJSONOnlyRoleMappings, logger, versionCounter.incrementAndGet()); assertRoleMappingsInClusterStateWithAwait( savedClusterState, @@ -226,7 +227,7 @@ public void testFileSettingsReprocessedOnRestartWithoutVersionChange() throws Ex ); // write without version increment and assert that change gets applied on restart - writeJSONFileWithoutVersionIncrement(masterNode, testJSONOnlyUpdatedRoleMappings, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, testJSONOnlyUpdatedRoleMappings, logger, versionCounter.get()); logger.info("--> restart master"); internalCluster().restartNode(masterNode); ensureGreen(); @@ -288,7 +289,7 @@ private void cleanupClusterStateAndAssertNoMappings(String masterNode) throws Ex var savedClusterState = setupClusterStateListenerForCleanup(masterNode); awaitFileSettingsWatcher(); logger.info("--> remove the role mappings with an empty settings file"); - writeJSONFile(masterNode, emptyJSON, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, emptyJSON, logger, versionCounter.incrementAndGet()); boolean awaitSuccessful = savedClusterState.v1().await(MAX_WAIT_TIME_SECONDS, TimeUnit.SECONDS); assertTrue(awaitSuccessful); // ensure cluster-state update got propagated to expected version diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/CleanupRoleMappingDuplicatesMigrationIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/CleanupRoleMappingDuplicatesMigrationIT.java index 63c510062bdad..e7f544399bdf0 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/CleanupRoleMappingDuplicatesMigrationIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/CleanupRoleMappingDuplicatesMigrationIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.integration.RoleMappingFileSettingsIT; import org.elasticsearch.reservedstate.service.FileSettingsService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.SecurityIntegTestCase; @@ -40,7 +41,6 @@ import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.integration.RoleMappingFileSettingsIT.setupClusterStateListener; -import static org.elasticsearch.integration.RoleMappingFileSettingsIT.writeJSONFile; import static org.elasticsearch.xpack.core.security.action.UpdateIndexMigrationVersionAction.MIGRATION_VERSION_CUSTOM_DATA_KEY; import static org.elasticsearch.xpack.core.security.action.UpdateIndexMigrationVersionAction.MIGRATION_VERSION_CUSTOM_KEY; import static org.elasticsearch.xpack.core.security.test.TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7; @@ -138,7 +138,7 @@ public void testMigrationSuccessful() throws Exception { // Setup listener to wait for role mapping var fileBasedRoleMappingsWrittenListener = setupClusterStateListener(masterNode, "everyone_kibana_alone"); // Write role mappings - writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter.incrementAndGet()); assertTrue(fileBasedRoleMappingsWrittenListener.v1().await(20, TimeUnit.SECONDS)); waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); @@ -170,7 +170,7 @@ public void testMigrationSuccessfulNoOverlap() throws Exception { // Setup listener to wait for role mapping var fileBasedRoleMappingsWrittenListener = setupClusterStateListener(masterNode, "everyone_kibana_alone"); // Write role mappings with fallback name, this should block any security migration - writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter.incrementAndGet()); assertTrue(fileBasedRoleMappingsWrittenListener.v1().await(20, TimeUnit.SECONDS)); waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); @@ -202,7 +202,7 @@ public void testMigrationSuccessfulNoNative() throws Exception { // Setup listener to wait for role mapping var fileBasedRoleMappingsWrittenListener = setupClusterStateListener(masterNode, "everyone_kibana_alone"); // Write role mappings with fallback name, this should block any security migration - writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter.incrementAndGet()); assertTrue(fileBasedRoleMappingsWrittenListener.v1().await(20, TimeUnit.SECONDS)); waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); @@ -228,7 +228,7 @@ public void testMigrationFallbackNamePreCondition() throws Exception { // Setup listener to wait for role mapping var nameNotAvailableListener = setupClusterStateListener(masterNode, "name_not_available_after_deserialization"); // Write role mappings with fallback name, this should block any security migration - writeJSONFile(masterNode, TEST_JSON_WITH_FALLBACK_NAME, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, TEST_JSON_WITH_FALLBACK_NAME, logger, versionCounter.incrementAndGet()); assertTrue(nameNotAvailableListener.v1().await(20, TimeUnit.SECONDS)); // Create a native role mapping to create security index and trigger migration @@ -249,7 +249,7 @@ public void testMigrationFallbackNamePreCondition() throws Exception { assertThat(status, equalTo(SecurityIndexManager.RoleMappingsCleanupMigrationStatus.NOT_READY)); // Write file without fallback name in it to unblock migration - writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter.incrementAndGet()); waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); } @@ -282,7 +282,7 @@ public void testSkipMigrationEmptyFileBasedMappings() throws Exception { // Setup listener to wait for any role mapping var fileBasedRoleMappingsWrittenListener = setupClusterStateListener(masterNode); // Write role mappings - writeJSONFile(masterNode, TEST_JSON_WITH_EMPTY_ROLE_MAPPINGS, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, TEST_JSON_WITH_EMPTY_ROLE_MAPPINGS, logger, versionCounter.incrementAndGet()); assertTrue(fileBasedRoleMappingsWrittenListener.v1().await(20, TimeUnit.SECONDS)); // Create a native role mapping to create security index and trigger migration (skipped initially) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/enrollment/TransportNodeEnrollmentActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/enrollment/TransportNodeEnrollmentActionTests.java index a4d94f9762e69..c85684a60e449 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/enrollment/TransportNodeEnrollmentActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/enrollment/TransportNodeEnrollmentActionTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.ssl.SslConfiguration; @@ -103,7 +104,7 @@ public void testDoExecute() throws Exception { nodeInfos.add( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java index dd6c41b0a10eb..383d4e4c9fe9f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.BackoffPolicy; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureString; @@ -236,7 +237,7 @@ public Answer answerNullHttpInfo(InvocationOnMock invocationO List.of( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, @@ -271,7 +272,7 @@ private Answer answerWithInfo(InvocationOnMock invocationOnMo List.of( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java index 86575d418e605..1a9eb1fde6c87 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java @@ -46,6 +46,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -797,12 +798,12 @@ private void assertGeoLine_TSDB( ArrayList points = testData.pointsForGroup(g); ArrayList timestamps = testData.timestampsForGroup(g); for (int i = 0; i < points.size(); i++) { - final TimeSeriesIdFieldMapper.TimeSeriesIdBuilder builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); - builder.addString("group_id", testData.groups[g]); + var routingFields = new RoutingPathFields(null); + routingFields.addString("group_id", testData.groups[g]); ArrayList fields = new ArrayList<>( Arrays.asList( new SortedDocValuesField("group_id", new BytesRef(testData.groups[g])), - new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, builder.buildTsidHash().toBytesRef()) + new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, routingFields.buildHash().toBytesRef()) ) ); GeoPoint point = points.get(i);