From 6ee9801a99ec9f624992dfacfc118edd867f7f4c Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Tue, 13 Aug 2024 13:39:55 +0900 Subject: [PATCH 001/389] Update the intervals query docs (#111808) Since https://github.com/apache/lucene-solr/pull/620, intervals disjunctions are automatically rewritten to handle cases where minimizations can miss valid matches. This change updates the documentation to take this behaviour into account (users don't need to manually pull intervals disjunctions to the top anymore). --- .../query-dsl/intervals-query.asciidoc | 65 ------------------- .../test/search/230_interval_query.yml | 31 +++++++++ 2 files changed, 31 insertions(+), 65 deletions(-) diff --git a/docs/reference/query-dsl/intervals-query.asciidoc b/docs/reference/query-dsl/intervals-query.asciidoc index 63ba4046a395d..1e3380389d861 100644 --- a/docs/reference/query-dsl/intervals-query.asciidoc +++ b/docs/reference/query-dsl/intervals-query.asciidoc @@ -397,68 +397,3 @@ This query does *not* match a document containing the phrase `hot porridge is salty porridge`, because the intervals returned by the match query for `hot porridge` only cover the initial two terms in this document, and these do not overlap the intervals covering `salty`. - -Another restriction to be aware of is the case of `any_of` rules that contain -sub-rules which overlap. In particular, if one of the rules is a strict -prefix of the other, then the longer rule can never match, which can -cause surprises when used in combination with `max_gaps`. Consider the -following query, searching for `the` immediately followed by `big` or `big bad`, -immediately followed by `wolf`: - -[source,console] --------------------------------------------------- -POST _search -{ - "query": { - "intervals" : { - "my_text" : { - "all_of" : { - "intervals" : [ - { "match" : { "query" : "the" } }, - { "any_of" : { - "intervals" : [ - { "match" : { "query" : "big" } }, - { "match" : { "query" : "big bad" } } - ] } }, - { "match" : { "query" : "wolf" } } - ], - "max_gaps" : 0, - "ordered" : true - } - } - } - } -} --------------------------------------------------- - -Counter-intuitively, this query does *not* match the document `the big bad -wolf`, because the `any_of` rule in the middle only produces intervals -for `big` - intervals for `big bad` being longer than those for `big`, while -starting at the same position, and so being minimized away. In these cases, -it's better to rewrite the query so that all of the options are explicitly -laid out at the top level: - -[source,console] --------------------------------------------------- -POST _search -{ - "query": { - "intervals" : { - "my_text" : { - "any_of" : { - "intervals" : [ - { "match" : { - "query" : "the big bad wolf", - "ordered" : true, - "max_gaps" : 0 } }, - { "match" : { - "query" : "the big wolf", - "ordered" : true, - "max_gaps" : 0 } } - ] - } - } - } - } -} --------------------------------------------------- diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/230_interval_query.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/230_interval_query.yml index 82fb18a879346..99bd001bd95e2 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/230_interval_query.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/230_interval_query.yml @@ -21,6 +21,10 @@ setup: - '{"text" : "Baby its cold there outside"}' - '{"index": {"_index": "test", "_id": "4"}}' - '{"text" : "Outside it is cold and wet"}' + - '{"index": {"_index": "test", "_id": "5"}}' + - '{"text" : "the big bad wolf"}' + - '{"index": {"_index": "test", "_id": "6"}}' + - '{"text" : "the big wolf"}' --- "Test ordered matching": @@ -444,4 +448,31 @@ setup: prefix: out - match: { hits.total.value: 3 } +--- +"Test rewrite disjunctions": + - do: + search: + index: test + body: + query: + intervals: + text: + all_of: + intervals: + - "match": + "query": "the" + - "any_of": + "intervals": + - "match": + "query": "big" + - "match": + "query": "big bad" + - "match": + "query": "wolf" + max_gaps: 0 + ordered: true + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "6" } + - match: { hits.hits.1._id: "5" } From 4f70047ee4b0764270a45ca3fef8a997a42d554c Mon Sep 17 00:00:00 2001 From: Iraklis Psaroudakis Date: Tue, 13 Aug 2024 09:06:48 +0300 Subject: [PATCH 002/389] Give executor to cache instead of string (#111711) Relates ES-8155 --- .../shared/SharedBlobCacheService.java | 6 ++-- .../shared/SharedBlobCacheServiceTests.java | 34 +++++++++---------- .../SearchableSnapshots.java | 2 +- .../AbstractSearchableSnapshotsTestCase.java | 6 ++-- .../store/input/FrozenIndexInputTests.java | 2 +- 5 files changed, 25 insertions(+), 25 deletions(-) diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 28a5eb164d049..43baf34b04222 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -335,7 +335,7 @@ public SharedBlobCacheService( NodeEnvironment environment, Settings settings, ThreadPool threadPool, - String ioExecutor, + Executor ioExecutor, BlobCacheMetrics blobCacheMetrics ) { this(environment, settings, threadPool, ioExecutor, blobCacheMetrics, System::nanoTime); @@ -345,12 +345,12 @@ public SharedBlobCacheService( NodeEnvironment environment, Settings settings, ThreadPool threadPool, - String ioExecutor, + Executor ioExecutor, BlobCacheMetrics blobCacheMetrics, LongSupplier relativeTimeInNanosSupplier ) { this.threadPool = threadPool; - this.ioExecutor = threadPool.executor(ioExecutor); + this.ioExecutor = ioExecutor; long totalFsSize; try { totalFsSize = FsProbe.getTotal(Environment.getFileStore(environment.nodeDataPaths()[0])); diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java index 6c49b50c06e82..346950d385a40 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java @@ -94,7 +94,7 @@ public void testBasicEviction() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -175,7 +175,7 @@ public void testAutoEviction() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -219,7 +219,7 @@ public void testForceEviction() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -253,7 +253,7 @@ public void testForceEvictResponse() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -287,7 +287,7 @@ public void testDecay() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -395,7 +395,7 @@ public void testMassiveDecay() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -470,7 +470,7 @@ public void testGetMultiThreaded() throws IOException { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -550,7 +550,7 @@ public void execute(Runnable command) { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -618,7 +618,7 @@ public void testFetchFullCacheEntryConcurrently() throws Exception { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -826,7 +826,7 @@ public void testCacheSizeChanges() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -844,7 +844,7 @@ public void testCacheSizeChanges() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -869,7 +869,7 @@ public void testMaybeEvictLeastUsed() throws Exception { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -967,7 +967,7 @@ public void execute(Runnable command) { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -1117,7 +1117,7 @@ public void execute(Runnable command) { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -1278,7 +1278,7 @@ public void testPopulate() throws Exception { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -1394,7 +1394,7 @@ public void testUseFullRegionSize() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) { @Override @@ -1435,7 +1435,7 @@ public void testSharedSourceInputStreamFactory() throws Exception { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java index 18ebe65d87986..4eea006b4c2f2 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java @@ -331,7 +331,7 @@ public Collection createComponents(PluginServices services) { nodeEnvironment, settings, threadPool, - SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, + threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), new BlobCacheMetrics(services.telemetryProvider().getMeterRegistry()) ); this.frozenCacheService.set(sharedBlobCacheService); diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java index 5f083d568fed8..41121453e41a4 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java @@ -144,7 +144,7 @@ protected SharedBlobCacheService defaultFrozenCacheService() { nodeEnvironment, Settings.EMPTY, threadPool, - SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, + threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), BlobCacheMetrics.NOOP ); } @@ -167,7 +167,7 @@ protected SharedBlobCacheService randomFrozenCacheService() { singlePathNodeEnvironment, cacheSettings.build(), threadPool, - SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, + threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), BlobCacheMetrics.NOOP ); } @@ -192,7 +192,7 @@ protected SharedBlobCacheService createFrozenCacheService(final ByteSi .put(SharedBlobCacheService.SHARED_CACHE_RANGE_SIZE_SETTING.getKey(), cacheRangeSize) .build(), threadPool, - SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, + threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), BlobCacheMetrics.NOOP ); } diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java index 81e9c06a149b9..53ea908ad8801 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java @@ -111,7 +111,7 @@ public void testRandomReads() throws IOException { nodeEnvironment, settings, threadPool, - SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, + threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), BlobCacheMetrics.NOOP ); CacheService cacheService = randomCacheService(); From 664573ca38842959e222d5b83eb4cd41fa078618 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 13 Aug 2024 08:43:04 +0100 Subject: [PATCH 003/389] Document manual steps in ILM delete phase (#111734) Spells out some cases in which ILM doesn't delete the underlying searchable snapshot and instructs users to delete them manually instead. Co-authored-by: shainaraskas <58563081+shainaraskas@users.noreply.github.com> --- docs/reference/ilm/actions/ilm-delete.asciidoc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/reference/ilm/actions/ilm-delete.asciidoc b/docs/reference/ilm/actions/ilm-delete.asciidoc index eac3b9804709a..beed60105ed96 100644 --- a/docs/reference/ilm/actions/ilm-delete.asciidoc +++ b/docs/reference/ilm/actions/ilm-delete.asciidoc @@ -15,6 +15,18 @@ Deletes the searchable snapshot created in a previous phase. Defaults to `true`. This option is applicable when the <> action is used in any previous phase. ++ +If you set this option to `false`, use the <> to remove {search-snaps} from your snapshot repository when +they are no longer needed. ++ +If you manually delete an index before the {ilm-cap} delete phase runs, then +{ilm-init} will not delete the underlying {search-snap}. Use the +<> to remove the {search-snap} from +your snapshot repository when it is no longer needed. ++ +See <> for +further information about deleting {search-snaps}. WARNING: If a policy with a searchable snapshot action is applied on an existing searchable snapshot index, the snapshot backing this index will NOT be deleted because it was not created by this policy. If you want From d2bd3742d90ca9fa03aa4725cbf2381e3cc0d2ad Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 13 Aug 2024 09:27:09 +0100 Subject: [PATCH 004/389] Add `GetSnapshotsIT#testAllFeatures` (#111786) The features of get-snapshots API are all tested in isolation or small combinations, but there's no one test which pins down exactly how they all interact. This commit adds such a test, to verify that any future optimization work preserves the observable behaviour. Relates #95345 Relates #104607 --- .../snapshots/GetSnapshotsIT.java | 255 ++++++++++++++++++ 1 file changed, 255 insertions(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index 1130ddaa74f38..66ddd47d7758d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -10,24 +10,41 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.TransportPutRepositoryAction; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.create.TransportCreateSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.action.admin.cluster.snapshots.get.SnapshotSortKey; +import org.elasticsearch.action.admin.cluster.snapshots.get.TransportGetSnapshotsAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Predicates; import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.threadpool.ThreadPool; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; +import java.util.function.Predicate; +import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.empty; @@ -745,4 +762,242 @@ private static GetSnapshotsRequestBuilder baseGetSnapshotsRequest(String[] repoN return clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoNames) .setSnapshots("*", "-" + AbstractSnapshotIntegTestCase.OLD_VERSION_SNAPSHOT_PREFIX + "*"); } + + public void testAllFeatures() { + // A test that uses (potentially) as many of the features of the get-snapshots API at once as possible, to verify that they interact + // in the expected order etc. + + // Create a few repositories and a few indices + final var repositories = randomList(1, 4, ESTestCase::randomIdentifier); + final var indices = randomList(1, 4, ESTestCase::randomIdentifier); + final var slmPolicies = randomList(1, 4, ESTestCase::randomIdentifier); + + safeAwait(l -> { + try (var listeners = new RefCountingListener(l.map(v -> null))) { + for (final var repository : repositories) { + client().execute( + TransportPutRepositoryAction.TYPE, + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repository).type(FsRepository.TYPE) + .settings(Settings.builder().put("location", randomRepoPath()).build()), + listeners.acquire(ElasticsearchAssertions::assertAcked) + ); + } + + for (final var index : indices) { + client().execute( + TransportCreateIndexAction.TYPE, + new CreateIndexRequest(index, indexSettings(1, 0).build()), + listeners.acquire(ElasticsearchAssertions::assertAcked) + ); + } + } + }); + ensureGreen(); + + // Create a few snapshots + final var snapshotInfos = Collections.synchronizedList(new ArrayList()); + safeAwait(l -> { + try (var listeners = new RefCountingListener(l.map(v -> null))) { + for (int i = 0; i < 10; i++) { + client().execute( + TransportCreateSnapshotAction.TYPE, + new CreateSnapshotRequest( + TEST_REQUEST_TIMEOUT, + // at least one snapshot per repository to satisfy consistency checks + i < repositories.size() ? repositories.get(i) : randomFrom(repositories), + randomIdentifier() + ).indices(randomNonEmptySubsetOf(indices)) + .userMetadata( + randomBoolean() ? Map.of() : Map.of(SnapshotsService.POLICY_ID_METADATA_FIELD, randomFrom(slmPolicies)) + ) + .waitForCompletion(true), + listeners.acquire( + createSnapshotResponse -> snapshotInfos.add(Objects.requireNonNull(createSnapshotResponse.getSnapshotInfo())) + ) + ); + } + } + }); + + Predicate snapshotInfoPredicate = Predicates.always(); + + // {repository} path parameter + final String[] requestedRepositories; + if (randomBoolean()) { + requestedRepositories = new String[] { randomFrom("_all", "*") }; + } else { + final var selectedRepositories = Set.copyOf(randomNonEmptySubsetOf(repositories)); + snapshotInfoPredicate = snapshotInfoPredicate.and(si -> selectedRepositories.contains(si.repository())); + requestedRepositories = selectedRepositories.toArray(new String[0]); + } + + // {snapshot} path parameter + final String[] requestedSnapshots; + if (randomBoolean()) { + requestedSnapshots = randomBoolean() ? Strings.EMPTY_ARRAY : new String[] { randomFrom("_all", "*") }; + } else { + final var selectedSnapshots = randomNonEmptySubsetOf(snapshotInfos).stream() + .map(si -> si.snapshotId().getName()) + .collect(Collectors.toSet()); + snapshotInfoPredicate = snapshotInfoPredicate.and(si -> selectedSnapshots.contains(si.snapshotId().getName())); + requestedSnapshots = selectedSnapshots.stream() + // if we have multiple repositories, add a trailing wildcard to each requested snapshot name, because if we specify exact + // names then there must be a snapshot with that name in every requested repository + .map(n -> repositories.size() == 1 && randomBoolean() ? n : n + "*") + .toArray(String[]::new); + } + + // ?slm_policy_filter parameter + final String[] requestedSlmPolicies; + switch (between(0, 3)) { + default -> requestedSlmPolicies = Strings.EMPTY_ARRAY; + case 1 -> { + requestedSlmPolicies = new String[] { "*" }; + snapshotInfoPredicate = snapshotInfoPredicate.and( + si -> si.userMetadata().get(SnapshotsService.POLICY_ID_METADATA_FIELD) != null + ); + } + case 2 -> { + requestedSlmPolicies = new String[] { "_none" }; + snapshotInfoPredicate = snapshotInfoPredicate.and( + si -> si.userMetadata().get(SnapshotsService.POLICY_ID_METADATA_FIELD) == null + ); + } + case 3 -> { + final var selectedPolicies = Set.copyOf(randomNonEmptySubsetOf(slmPolicies)); + requestedSlmPolicies = selectedPolicies.stream() + .map(policy -> randomBoolean() ? policy : policy + "*") + .toArray(String[]::new); + snapshotInfoPredicate = snapshotInfoPredicate.and( + si -> si.userMetadata().get(SnapshotsService.POLICY_ID_METADATA_FIELD) instanceof String policy + && selectedPolicies.contains(policy) + ); + } + } + + // ?sort and ?order parameters + final var sortKey = randomFrom(SnapshotSortKey.values()); + final var order = randomFrom(SortOrder.values()); + // NB we sometimes choose to sort by FAILED_SHARDS, but there are no failed shards in these snapshots. We're still testing the + // fallback sorting by snapshot ID in this case. We also have no multi-shard indices so there's no difference between sorting by + // INDICES and by SHARDS. The actual sorting behaviour for these cases is tested elsewhere, here we're just checking that sorting + // interacts correctly with the other parameters to the API. + + // compute the ordered sequence of snapshots which match the repository/snapshot name filters and SLM policy filter + final var selectedSnapshots = snapshotInfos.stream() + .filter(snapshotInfoPredicate) + .sorted(sortKey.getSnapshotInfoComparator(order)) + .toList(); + + final var getSnapshotsRequest = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, requestedRepositories, requestedSnapshots).policies( + requestedSlmPolicies + ) + // apply sorting params + .sort(sortKey) + .order(order); + + // sometimes use ?from_sort_value to skip some items; note that snapshots skipped in this way are subtracted from + // GetSnapshotsResponse.totalCount whereas snapshots skipped by ?after and ?offset are not + final int skippedByFromSortValue; + if (randomBoolean()) { + final var startingSnapshot = randomFrom(snapshotInfos); + getSnapshotsRequest.fromSortValue(switch (sortKey) { + case START_TIME -> Long.toString(startingSnapshot.startTime()); + case NAME -> startingSnapshot.snapshotId().getName(); + case DURATION -> Long.toString(startingSnapshot.endTime() - startingSnapshot.startTime()); + case INDICES, SHARDS -> Integer.toString(startingSnapshot.indices().size()); + case FAILED_SHARDS -> "0"; + case REPOSITORY -> startingSnapshot.repository(); + }); + final Predicate fromSortValuePredicate = snapshotInfo -> { + final var comparison = switch (sortKey) { + case START_TIME -> Long.compare(snapshotInfo.startTime(), startingSnapshot.startTime()); + case NAME -> snapshotInfo.snapshotId().getName().compareTo(startingSnapshot.snapshotId().getName()); + case DURATION -> Long.compare( + snapshotInfo.endTime() - snapshotInfo.startTime(), + startingSnapshot.endTime() - startingSnapshot.startTime() + ); + case INDICES, SHARDS -> Integer.compare(snapshotInfo.indices().size(), startingSnapshot.indices().size()); + case FAILED_SHARDS -> 0; + case REPOSITORY -> snapshotInfo.repository().compareTo(startingSnapshot.repository()); + }; + return order == SortOrder.ASC ? comparison < 0 : comparison > 0; + }; + + int skipCount = 0; + for (final var snapshotInfo : selectedSnapshots) { + if (fromSortValuePredicate.test(snapshotInfo)) { + skipCount += 1; + } else { + break; + } + } + skippedByFromSortValue = skipCount; + } else { + skippedByFromSortValue = 0; + } + + // ?offset parameter + if (randomBoolean()) { + getSnapshotsRequest.offset(between(0, selectedSnapshots.size() + 1)); + } + + // ?size parameter + if (randomBoolean()) { + getSnapshotsRequest.size(between(1, selectedSnapshots.size() + 1)); + } + + // compute the expected offset and size of the returned snapshots as indices in selectedSnapshots: + final var expectedOffset = Math.min(selectedSnapshots.size(), skippedByFromSortValue + getSnapshotsRequest.offset()); + final var expectedSize = Math.min( + selectedSnapshots.size() - expectedOffset, + getSnapshotsRequest.size() == GetSnapshotsRequest.NO_LIMIT ? Integer.MAX_VALUE : getSnapshotsRequest.size() + ); + + // get the actual response + final GetSnapshotsResponse getSnapshotsResponse = safeAwait( + l -> client().execute(TransportGetSnapshotsAction.TYPE, getSnapshotsRequest, l) + ); + + // verify it returns the expected results + assertEquals( + selectedSnapshots.stream().skip(expectedOffset).limit(expectedSize).map(SnapshotInfo::snapshotId).toList(), + getSnapshotsResponse.getSnapshots().stream().map(SnapshotInfo::snapshotId).toList() + ); + assertEquals(expectedSize, getSnapshotsResponse.getSnapshots().size()); + assertEquals(selectedSnapshots.size() - skippedByFromSortValue, getSnapshotsResponse.totalCount()); + assertEquals(selectedSnapshots.size() - expectedOffset - expectedSize, getSnapshotsResponse.remaining()); + assertEquals(getSnapshotsResponse.remaining() > 0, getSnapshotsResponse.next() != null); + + // now use ?after to page through the rest of the results + var nextRequestAfter = getSnapshotsResponse.next(); + var nextExpectedOffset = expectedOffset + expectedSize; + var remaining = getSnapshotsResponse.remaining(); + while (nextRequestAfter != null) { + final var nextSize = between(1, remaining); + final var nextRequest = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, requestedRepositories, requestedSnapshots) + // same name/policy filters, same ?sort and ?order params, new ?size, but no ?offset or ?from_sort_value because of ?after + .policies(requestedSlmPolicies) + .sort(sortKey) + .order(order) + .size(nextSize) + .after(SnapshotSortKey.decodeAfterQueryParam(nextRequestAfter)); + final GetSnapshotsResponse nextResponse = safeAwait(l -> client().execute(TransportGetSnapshotsAction.TYPE, nextRequest, l)); + + assertEquals( + selectedSnapshots.stream().skip(nextExpectedOffset).limit(nextSize).map(SnapshotInfo::snapshotId).toList(), + nextResponse.getSnapshots().stream().map(SnapshotInfo::snapshotId).toList() + ); + assertEquals(nextSize, nextResponse.getSnapshots().size()); + assertEquals(selectedSnapshots.size(), nextResponse.totalCount()); + assertEquals(remaining - nextSize, nextResponse.remaining()); + assertEquals(nextResponse.remaining() > 0, nextResponse.next() != null); + + nextRequestAfter = nextResponse.next(); + nextExpectedOffset += nextSize; + remaining -= nextSize; + } + + assertEquals(0, remaining); + } } From 9b061f7805ac03c38f865171e3098fc00428449f Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 13 Aug 2024 09:39:50 +0100 Subject: [PATCH 005/389] Rework `EnrichPolicyRunner` in terms of listener chains (#111432) Rephrases the implementation of `EnrichPolicyRunner` in terms of `SubscribableListener` chains to make it easier to follow the flow of execution, rather than hiding each step directly within the listener passed to the previous step. --- .../xpack/enrich/EnrichPolicyRunner.java | 318 +++++++++--------- .../xpack/enrich/EnrichPolicyRunnerTests.java | 22 +- 2 files changed, 172 insertions(+), 168 deletions(-) diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java index 0891f24feda68..cab115ddc4964 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java @@ -16,20 +16,27 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.DelegatingActionListener; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.segments.IndexSegments; import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.admin.indices.segments.ShardSegments; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.FilterClient; import org.elasticsearch.client.internal.OriginSettingClient; @@ -125,21 +132,36 @@ public class EnrichPolicyRunner { } public void run(ActionListener listener) { - try { - logger.info("Policy [{}]: Running enrich policy", policyName); - task.setStatus(new ExecuteEnrichPolicyStatus(ExecuteEnrichPolicyStatus.PolicyPhases.RUNNING)); - // Collect the source index information - final String[] sourceIndices = policy.getIndices().toArray(new String[0]); - logger.debug("Policy [{}]: Checking source indices [{}]", policyName, sourceIndices); - GetIndexRequest getIndexRequest = new GetIndexRequest().indices(sourceIndices); - // This call does not set the origin to ensure that the user executing the policy has permission to access the source index - client.admin().indices().getIndex(getIndexRequest, listener.delegateFailureAndWrap((l, getIndexResponse) -> { + logger.info("Policy [{}]: Running enrich policy", policyName); + task.setStatus(new ExecuteEnrichPolicyStatus(ExecuteEnrichPolicyStatus.PolicyPhases.RUNNING)); + + SubscribableListener + + .newForked(l -> { + // Collect the source index information + final String[] sourceIndices = policy.getIndices().toArray(new String[0]); + logger.debug("Policy [{}]: Checking source indices [{}]", policyName, sourceIndices); + GetIndexRequest getIndexRequest = new GetIndexRequest().indices(sourceIndices); + // This call does not set the origin to ensure that the user executing the policy has permission to access the source index + client.admin().indices().getIndex(getIndexRequest, l); + }) + .andThen((l, getIndexResponse) -> { validateMappings(getIndexResponse); prepareAndCreateEnrichIndex(toMappings(getIndexResponse), clusterService.getSettings(), l); - })); - } catch (Exception e) { - listener.onFailure(e); - } + }) + .andThen(this::prepareReindexOperation) + .andThen(this::transferDataToEnrichIndex) + .andThen(this::forceMergeEnrichIndex) + .andThen(this::setIndexReadOnly) + .andThen(this::waitForIndexGreen) + .andThen(this::updateEnrichPolicyAlias) + .andThenApply(r -> { + logger.info("Policy [{}]: Policy execution complete", policyName); + ExecuteEnrichPolicyStatus completeStatus = new ExecuteEnrichPolicyStatus(ExecuteEnrichPolicyStatus.PolicyPhases.COMPLETE); + task.setStatus(completeStatus); + return completeStatus; + }) + .addListener(listener); } private static List> toMappings(GetIndexResponse response) { @@ -433,7 +455,7 @@ static boolean isIndexableField(MapperService mapperService, String field, Strin private void prepareAndCreateEnrichIndex( List> mappings, Settings settings, - ActionListener listener + ActionListener listener ) { int numberOfReplicas = settings.getAsInt(ENRICH_MIN_NUMBER_OF_REPLICAS_NAME, 0); Settings enrichIndexSettings = Settings.builder() @@ -447,28 +469,20 @@ private void prepareAndCreateEnrichIndex( CreateIndexRequest createEnrichIndexRequest = new CreateIndexRequest(enrichIndexName, enrichIndexSettings); createEnrichIndexRequest.mapping(createEnrichMapping(mappings)); logger.debug("Policy [{}]: Creating new enrich index [{}]", policyName, enrichIndexName); - enrichOriginClient().admin() - .indices() - .create( - createEnrichIndexRequest, - listener.delegateFailure((l, createIndexResponse) -> prepareReindexOperation(enrichIndexName, l)) - ); + enrichOriginClient().admin().indices().create(createEnrichIndexRequest, listener); } - private void prepareReindexOperation(final String destinationIndexName, ActionListener listener) { + private void prepareReindexOperation(ActionListener listener) { // Check to make sure that the enrich pipeline exists, and create it if it is missing. - if (EnrichPolicyReindexPipeline.exists(clusterService.state()) == false) { - EnrichPolicyReindexPipeline.create( - enrichOriginClient(), - listener.delegateFailure((l, r) -> transferDataToEnrichIndex(destinationIndexName, l)) - ); + if (EnrichPolicyReindexPipeline.exists(clusterService.state())) { + listener.onResponse(null); } else { - transferDataToEnrichIndex(destinationIndexName, listener); + EnrichPolicyReindexPipeline.create(enrichOriginClient(), listener); } } - private void transferDataToEnrichIndex(final String destinationIndexName, ActionListener listener) { - logger.debug("Policy [{}]: Transferring source data to new enrich index [{}]", policyName, destinationIndexName); + private void transferDataToEnrichIndex(ActionListener listener) { + logger.debug("Policy [{}]: Transferring source data to new enrich index [{}]", policyName, enrichIndexName); // Filter down the source fields to just the ones required by the policy final Set retainFields = new HashSet<>(); retainFields.add(policy.getMatchField()); @@ -479,7 +493,7 @@ private void transferDataToEnrichIndex(final String destinationIndexName, Action if (policy.getQuery() != null) { searchSourceBuilder.query(QueryBuilders.wrapperQuery(policy.getQuery().getQuery())); } - ReindexRequest reindexRequest = new ReindexRequest().setDestIndex(destinationIndexName) + ReindexRequest reindexRequest = new ReindexRequest().setDestIndex(enrichIndexName) .setSourceIndices(policy.getIndices().toArray(new String[0])); reindexRequest.getSearchRequest().source(searchSourceBuilder); reindexRequest.getDestination().source(new BytesArray(new byte[0]), XContentType.SMILE); @@ -536,147 +550,142 @@ public void onResponse(BulkByScrollResponse bulkByScrollResponse) { "Policy [{}]: Transferred [{}] documents to enrich index [{}]", policyName, bulkByScrollResponse.getCreated(), - destinationIndexName + enrichIndexName ); - forceMergeEnrichIndex(destinationIndexName, 1, delegate); + delegate.onResponse(null); } } }); } - private void forceMergeEnrichIndex( - final String destinationIndexName, - final int attempt, - ActionListener listener - ) { + private void forceMergeEnrichIndex(ActionListener listener) { + forceMergeEnrichIndexOrRetry(1, listener); + } + + private void forceMergeEnrichIndexOrRetry(final int attempt, ActionListener listener) { logger.debug( "Policy [{}]: Force merging newly created enrich index [{}] (Attempt {}/{})", policyName, - destinationIndexName, + enrichIndexName, attempt, maxForceMergeAttempts ); - enrichOriginClient().admin() - .indices() - .forceMerge( - new ForceMergeRequest(destinationIndexName).maxNumSegments(1), - listener.delegateFailure((l, r) -> refreshEnrichIndex(destinationIndexName, attempt, l)) - ); - } - private void refreshEnrichIndex( - final String destinationIndexName, - final int attempt, - ActionListener listener - ) { - logger.debug("Policy [{}]: Refreshing enrich index [{}]", policyName, destinationIndexName); - enrichOriginClient().admin() - .indices() - .refresh( - new RefreshRequest(destinationIndexName), - listener.delegateFailure((l, r) -> ensureSingleSegment(destinationIndexName, attempt, l)) - ); - } - - protected void ensureSingleSegment( - final String destinationIndexName, - final int attempt, - ActionListener listener - ) { - enrichOriginClient().admin() - .indices() - .segments(new IndicesSegmentsRequest(destinationIndexName), listener.delegateFailureAndWrap((l, indicesSegmentResponse) -> { - int failedShards = indicesSegmentResponse.getFailedShards(); - if (failedShards > 0) { - // Encountered a problem while querying the segments for the enrich index. Try and surface the problem in the log. - logger.warn( - "Policy [{}]: Encountered [{}] shard level failures while querying the segments for enrich index [{}]. " - + "Turn on DEBUG logging for details.", - policyName, - failedShards, - enrichIndexName - ); - if (logger.isDebugEnabled()) { - DefaultShardOperationFailedException[] shardFailures = indicesSegmentResponse.getShardFailures(); - int failureNumber = 1; - String logPrefix = "Policy [" + policyName + "]: Encountered shard failure ["; - String logSuffix = " of " - + shardFailures.length - + "] while querying segments for enrich index [" - + enrichIndexName - + "]. Shard ["; - for (DefaultShardOperationFailedException shardFailure : shardFailures) { + SubscribableListener + + .newForked( + l -> enrichOriginClient().admin().indices().forceMerge(new ForceMergeRequest(enrichIndexName).maxNumSegments(1), l) + ) + .andThen(this::refreshEnrichIndex) + .andThen(this::afterRefreshEnrichIndex) + .andThen(this::getSegments) + .andThenApply(this::getSegmentCount) + .addListener( + // delegateFailureAndWrap() rather than andThen().addListener() to avoid building unnecessary O(#retries) listener chain + listener.delegateFailureAndWrap((l, segmentCount) -> { + if (segmentCount > 1) { + int nextAttempt = attempt + 1; + if (nextAttempt > maxForceMergeAttempts) { + throw new ElasticsearchException( + "Force merging index [{}] attempted [{}] times but did not result in one segment.", + enrichIndexName, + attempt, + maxForceMergeAttempts + ); + } else { logger.debug( - logPrefix + failureNumber + logSuffix + shardFailure.index() + "][" + shardFailure.shardId() + "]", - shardFailure.getCause() + "Policy [{}]: Force merge result contains more than one segment [{}], retrying (attempt {}/{})", + policyName, + segmentCount, + nextAttempt, + maxForceMergeAttempts ); - failureNumber++; + // TransportForceMergeAction always forks so no risk of stack overflow from this recursion + forceMergeEnrichIndexOrRetry(nextAttempt, l); } - } - } - IndexSegments indexSegments = indicesSegmentResponse.getIndices().get(destinationIndexName); - if (indexSegments == null) { - if (indicesSegmentResponse.getShardFailures().length == 0) { - throw new ElasticsearchException( - "Could not locate segment information for newly created index [{}]", - destinationIndexName - ); } else { - DefaultShardOperationFailedException shardFailure = indicesSegmentResponse.getShardFailures()[0]; - throw new ElasticsearchException( - "Could not obtain segment information for newly created index [{}]; shard info [{}][{}]", - shardFailure.getCause(), - destinationIndexName, - shardFailure.index(), - shardFailure.shardId() - ); + l.onResponse(null); } + }) + ); + } + + private void refreshEnrichIndex(ActionListener listener) { + logger.debug("Policy [{}]: Refreshing enrich index [{}]", policyName, enrichIndexName); + enrichOriginClient().admin().indices().refresh(new RefreshRequest(enrichIndexName), listener); + } + + // hook to allow testing force-merge retries + protected void afterRefreshEnrichIndex(ActionListener listener) { + listener.onResponse(null); + } + + private void getSegments(ActionListener listener) { + enrichOriginClient().admin().indices().segments(new IndicesSegmentsRequest(enrichIndexName), listener); + } + + private int getSegmentCount(IndicesSegmentResponse indicesSegmentResponse) { + int failedShards = indicesSegmentResponse.getFailedShards(); + if (failedShards > 0) { + // Encountered a problem while querying the segments for the enrich index. Try and surface the problem in the log. + logger.warn( + "Policy [{}]: Encountered [{}] shard level failures while querying the segments for enrich index [{}]. " + + "Turn on DEBUG logging for details.", + policyName, + failedShards, + enrichIndexName + ); + if (logger.isDebugEnabled()) { + DefaultShardOperationFailedException[] shardFailures = indicesSegmentResponse.getShardFailures(); + int failureNumber = 1; + String logPrefix = "Policy [" + policyName + "]: Encountered shard failure ["; + String logSuffix = " of " + + shardFailures.length + + "] while querying segments for enrich index [" + + enrichIndexName + + "]. Shard ["; + for (DefaultShardOperationFailedException shardFailure : shardFailures) { + logger.debug( + logPrefix + failureNumber + logSuffix + shardFailure.index() + "][" + shardFailure.shardId() + "]", + shardFailure.getCause() + ); + failureNumber++; } - Map indexShards = indexSegments.getShards(); - assert indexShards.size() == 1 : "Expected enrich index to contain only one shard"; - ShardSegments[] shardSegments = indexShards.get(0).shards(); - assert shardSegments.length == 1 : "Expected enrich index to contain no replicas at this point"; - ShardSegments primarySegments = shardSegments[0]; - if (primarySegments.getSegments().size() > 1) { - int nextAttempt = attempt + 1; - if (nextAttempt > maxForceMergeAttempts) { - throw new ElasticsearchException( - "Force merging index [{}] attempted [{}] times but did not result in one segment.", - destinationIndexName, - attempt, - maxForceMergeAttempts - ); - } else { - logger.debug( - "Policy [{}]: Force merge result contains more than one segment [{}], retrying (attempt {}/{})", - policyName, - primarySegments.getSegments().size(), - nextAttempt, - maxForceMergeAttempts - ); - forceMergeEnrichIndex(destinationIndexName, nextAttempt, listener); - } - } else { - // Force merge down to one segment successful - setIndexReadOnly(destinationIndexName, listener); - } - })); + } + } + IndexSegments indexSegments = indicesSegmentResponse.getIndices().get(enrichIndexName); + if (indexSegments == null) { + if (indicesSegmentResponse.getShardFailures().length == 0) { + throw new ElasticsearchException("Could not locate segment information for newly created index [{}]", enrichIndexName); + } else { + DefaultShardOperationFailedException shardFailure = indicesSegmentResponse.getShardFailures()[0]; + throw new ElasticsearchException( + "Could not obtain segment information for newly created index [{}]; shard info [{}][{}]", + shardFailure.getCause(), + enrichIndexName, + shardFailure.index(), + shardFailure.shardId() + ); + } + } + Map indexShards = indexSegments.getShards(); + assert indexShards.size() == 1 : "Expected enrich index to contain only one shard"; + ShardSegments[] shardSegments = indexShards.get(0).shards(); + assert shardSegments.length == 1 : "Expected enrich index to contain no replicas at this point"; + ShardSegments primarySegments = shardSegments[0]; + return primarySegments.getSegments().size(); } - private void setIndexReadOnly(final String destinationIndexName, ActionListener listener) { - logger.debug("Policy [{}]: Setting new enrich index [{}] to be read only", policyName, destinationIndexName); - UpdateSettingsRequest request = new UpdateSettingsRequest(destinationIndexName).setPreserveExisting(true) + private void setIndexReadOnly(ActionListener listener) { + logger.debug("Policy [{}]: Setting new enrich index [{}] to be read only", policyName, enrichIndexName); + UpdateSettingsRequest request = new UpdateSettingsRequest(enrichIndexName).setPreserveExisting(true) .settings(Settings.builder().put("index.auto_expand_replicas", "0-all").put("index.blocks.write", "true")); - enrichOriginClient().admin() - .indices() - .updateSettings(request, listener.delegateFailure((l, r) -> waitForIndexGreen(destinationIndexName, l))); + enrichOriginClient().admin().indices().updateSettings(request, listener); } - private void waitForIndexGreen(final String destinationIndexName, ActionListener listener) { - ClusterHealthRequest request = new ClusterHealthRequest(destinationIndexName).waitForGreenStatus(); - enrichOriginClient().admin() - .cluster() - .health(request, listener.delegateFailureAndWrap((l, r) -> updateEnrichPolicyAlias(destinationIndexName, l))); + private void waitForIndexGreen(ActionListener listener) { + ClusterHealthRequest request = new ClusterHealthRequest(enrichIndexName).waitForGreenStatus(); + enrichOriginClient().admin().cluster().health(request, listener); } /** @@ -730,12 +739,12 @@ private void validateIndexBeforePromotion(String destinationIndexName, ClusterSt } } - private void updateEnrichPolicyAlias(final String destinationIndexName, ActionListener listener) { + private void updateEnrichPolicyAlias(ActionListener listener) { String enrichIndexBase = EnrichPolicy.getBaseName(policyName); - logger.debug("Policy [{}]: Promoting new enrich index [{}] to alias [{}]", policyName, destinationIndexName, enrichIndexBase); + logger.debug("Policy [{}]: Promoting new enrich index [{}] to alias [{}]", policyName, enrichIndexName, enrichIndexBase); GetAliasesRequest aliasRequest = new GetAliasesRequest(enrichIndexBase); ClusterState clusterState = clusterService.state(); - validateIndexBeforePromotion(destinationIndexName, clusterState); + validateIndexBeforePromotion(enrichIndexName, clusterState); String[] concreteIndices = indexNameExpressionResolver.concreteIndexNamesWithSystemIndexAccess(clusterState, aliasRequest); String[] aliases = aliasRequest.aliases(); IndicesAliasesRequest aliasToggleRequest = new IndicesAliasesRequest(); @@ -743,13 +752,8 @@ private void updateEnrichPolicyAlias(final String destinationIndexName, ActionLi if (indices.length > 0) { aliasToggleRequest.addAliasAction(IndicesAliasesRequest.AliasActions.remove().indices(indices).alias(enrichIndexBase)); } - aliasToggleRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add().index(destinationIndexName).alias(enrichIndexBase)); - enrichOriginClient().admin().indices().aliases(aliasToggleRequest, listener.safeMap(r -> { - logger.info("Policy [{}]: Policy execution complete", policyName); - ExecuteEnrichPolicyStatus completeStatus = new ExecuteEnrichPolicyStatus(ExecuteEnrichPolicyStatus.PolicyPhases.COMPLETE); - task.setStatus(completeStatus); - return completeStatus; - })); + aliasToggleRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add().index(enrichIndexName).alias(enrichIndexBase)); + enrichOriginClient().admin().indices().aliases(aliasToggleRequest, listener); } /** diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java index 75e10e7069563..d3dcd7ae36f59 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java @@ -1805,23 +1805,23 @@ public void run(ActionListener listener) { } @Override - protected void ensureSingleSegment( - String destinationIndexName, - int attempt, - ActionListener listener - ) { - forceMergeAttempts.incrementAndGet(); + protected void afterRefreshEnrichIndex(ActionListener listener) { + final var attempt = forceMergeAttempts.incrementAndGet(); if (attempt == 1) { // Put and flush a document to increase the number of segments, simulating not // all segments were merged on the first try. - DocWriteResponse indexRequest = client().index( + client().index( new IndexRequest().index(createdEnrichIndex) .source(unmergedDocument) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - ).actionGet(); - assertEquals(RestStatus.CREATED, indexRequest.status()); + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), + listener.delegateFailureAndWrap((l, response) -> { + assertEquals(RestStatus.CREATED, response.status()); + super.afterRefreshEnrichIndex(l); + }) + ); + } else { + super.afterRefreshEnrichIndex(listener); } - super.ensureSingleSegment(destinationIndexName, attempt, listener); } }; From 03384b5f6ffdbafbb57c261c798911303801c4d1 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 13 Aug 2024 10:39:18 +0100 Subject: [PATCH 006/389] Generalize `UnsafePlainActionFuture` slightly (#111826) There's no need to specialize this class for the one- and two-executor cases. This commit generalizes it to accept any collection of executors, and expands the comments a little. --- .../support/UnsafePlainActionFuture.java | 35 +++++++------------ 1 file changed, 12 insertions(+), 23 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/support/UnsafePlainActionFuture.java b/server/src/main/java/org/elasticsearch/action/support/UnsafePlainActionFuture.java index dfbd4f2b1801a..b76dfe07e18ed 100644 --- a/server/src/main/java/org/elasticsearch/action/support/UnsafePlainActionFuture.java +++ b/server/src/main/java/org/elasticsearch/action/support/UnsafePlainActionFuture.java @@ -10,44 +10,33 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; -import java.util.Objects; +import java.util.Set; /** * An unsafe future. You should not need to use this for new code, rather you should be able to convert that code to be async * or use a clear hierarchy of thread pool executors around the future. - * + *

* This future is unsafe, since it allows notifying the future on the same thread pool executor that it is being waited on. This * is a common deadlock scenario, since all threads may be waiting and thus no thread may be able to complete the future. + *

+ * Note that the deadlock protection in {@link PlainActionFuture} is very weak. In general there's a risk of deadlock if there's any cycle + * of threads which block/complete on each other's futures, or dispatch work to each other, but this is much harder to detect. */ @Deprecated(forRemoval = true) public class UnsafePlainActionFuture extends PlainActionFuture { - - private final String unsafeExecutor; - private final String unsafeExecutor2; - - /** - * Allow the single executor passed to be used unsafely. This allows waiting for the future and completing the future on threads in - * the same executor, but only for the specific executor. - */ - public UnsafePlainActionFuture(String unsafeExecutor) { - this(unsafeExecutor, "__none__"); - } + private final Set unsafeExecutors; /** - * Allow both executors passed to be used unsafely. This allows waiting for the future and completing the future on threads in - * the same executor, but only for the two specific executors. + * Create a future which permits any of the given named executors to be used unsafely (i.e. used for both waiting for the future's + * completion and completing the future). */ - public UnsafePlainActionFuture(String unsafeExecutor, String unsafeExecutor2) { - Objects.requireNonNull(unsafeExecutor); - Objects.requireNonNull(unsafeExecutor2); - this.unsafeExecutor = unsafeExecutor; - this.unsafeExecutor2 = unsafeExecutor2; + public UnsafePlainActionFuture(String... unsafeExecutors) { + assert unsafeExecutors.length > 0 : "use PlainActionFuture if there are no executors to use unsafely"; + this.unsafeExecutors = Set.of(unsafeExecutors); } @Override boolean allowedExecutors(Thread blockedThread, Thread completingThread) { - return super.allowedExecutors(blockedThread, completingThread) - || unsafeExecutor.equals(EsExecutors.executorName(blockedThread)) - || unsafeExecutor2.equals(EsExecutors.executorName(blockedThread)); + return super.allowedExecutors(blockedThread, completingThread) || unsafeExecutors.contains(EsExecutors.executorName(blockedThread)); } } From aa24d02a291a9755abdfd55314bd069ca1f7f10c Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 13 Aug 2024 12:58:05 +0100 Subject: [PATCH 007/389] Improve some `execute(Runnable)` invocations (#111832) Fixes a few spots where we're submitting to an executor a bare `Runnable` that completes a listener, replacing them all with an appropriate `ActionRunnable` util. --- .../main/java/org/elasticsearch/indices/IndicesService.java | 3 ++- .../java/org/elasticsearch/action/ActionRunnableTests.java | 4 +--- .../elasticsearch/cluster/NodeConnectionsServiceTests.java | 5 +++-- .../common/util/CancellableSingleObjectCacheTests.java | 3 ++- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 3016530292766..82a5c96bb7dc2 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -18,6 +18,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.ResolvedIndices; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.mapping.put.TransportAutoPutMappingAction; @@ -399,7 +400,7 @@ protected void doStop() { final CountDownLatch latch = new CountDownLatch(indices.size()); for (final Index index : indices) { indicesStopExecutor.execute( - () -> ActionListener.run( + ActionRunnable.wrap( ActionListener.assertOnce(ActionListener.releasing(latch::countDown)), l -> removeIndex( index, diff --git a/server/src/test/java/org/elasticsearch/action/ActionRunnableTests.java b/server/src/test/java/org/elasticsearch/action/ActionRunnableTests.java index ebc9205f34d05..f2fd54e240e24 100644 --- a/server/src/test/java/org/elasticsearch/action/ActionRunnableTests.java +++ b/server/src/test/java/org/elasticsearch/action/ActionRunnableTests.java @@ -47,11 +47,9 @@ public void testWrapReleasingNotRejected() throws Exception { assertEquals("simulated", e.getMessage()); assertTrue(releaseListener.isDone()); l.onResponse(null); - }), () -> safeReleaseListener.onResponse(null), l -> executor.execute(() -> ActionListener.completeWith(l, () -> { + }), () -> safeReleaseListener.onResponse(null), l -> executor.execute(ActionRunnable.run(l, () -> { if (randomBoolean()) { throw new ElasticsearchException("simulated"); - } else { - return null; } })))); diff --git a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index 4f1c5b7fa5dc5..8cb2f7c0a7ce1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -607,12 +608,12 @@ private void runConnectionBlock(CheckedRunnable connectionBlock) thro public void openConnection(DiscoveryNode node, ConnectionProfile profile, ActionListener listener) { final CheckedRunnable connectionBlock = nodeConnectionBlocks.get(node); if (profile == null && randomConnectionExceptions && randomBoolean()) { - threadPool.generic().execute(() -> ActionListener.completeWith(listener, () -> { + threadPool.generic().execute(ActionRunnable.run(listener, () -> { runConnectionBlock(connectionBlock); throw new ConnectTransportException(node, "simulated"); })); } else { - threadPool.generic().execute(() -> ActionListener.completeWith(listener, () -> { + threadPool.generic().execute(ActionRunnable.supply(listener, () -> { runConnectionBlock(connectionBlock); return new Connection() { private final SubscribableListener closeListener = new SubscribableListener<>(); diff --git a/server/src/test/java/org/elasticsearch/common/util/CancellableSingleObjectCacheTests.java b/server/src/test/java/org/elasticsearch/common/util/CancellableSingleObjectCacheTests.java index b038b6effd08f..c744b01fdc82c 100644 --- a/server/src/test/java/org/elasticsearch/common/util/CancellableSingleObjectCacheTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/CancellableSingleObjectCacheTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; @@ -202,7 +203,7 @@ protected void refresh( BooleanSupplier supersedeIfStale, ActionListener listener ) { - threadPool.generic().execute(() -> ActionListener.completeWith(listener, () -> { + threadPool.generic().execute(ActionRunnable.supply(listener, () -> { ensureNotCancelled.run(); if (s.equals("FAIL")) { throw new ElasticsearchException("simulated"); From 9ac0718d9033e0eee1e682612a73a2fdfe896fa7 Mon Sep 17 00:00:00 2001 From: Larisa Motova Date: Tue, 13 Aug 2024 01:59:03 -1000 Subject: [PATCH 008/389] Add docs for shard level stats in node stats (#111082) Fixes #111081 --- docs/reference/cluster/nodes-stats.asciidoc | 136 ++++++++++++++++++ docs/reference/rest-api/common-parms.asciidoc | 5 +- 2 files changed, 139 insertions(+), 2 deletions(-) diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index f188a5f2ddf04..61c58cea95b83 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -842,6 +842,142 @@ This is not shown for the `shards` level, since mappings may be shared across th ======= +`shards`:: +(object) When the `shards` level is requested, contains the aforementioned `indices` statistics for every shard (per +index, and then per shard ID), as well as the following shard-specific statistics (which are not shown when the +requested level is higher than `shards`): ++ +.Additional shard-specific statistics for the `shards` level +[%collapsible%open] +======= + +`routing`:: +(object) Contains routing information about the shard. ++ +.Properties of `routing` +[%collapsible%open] +======== + +`state`:: +(string) State of the shard. Returned values are: ++ +* `INITIALIZING`: The shard is initializing/recovering. +* `RELOCATING`: The shard is relocating. +* `STARTED`: The shard has started. +* `UNASSIGNED`: The shard is not assigned to any node. + +`primary`:: +(Boolean) Whether the shard is a primary shard or not. + +`node`:: +(string) ID of the node the shard is allocated to. + +`relocating_node`:: +(string) ID of the node the shard is either relocating to or relocating from, or null if shard is not relocating. + +======== + +`commit`:: +(object) Contains information regarding the last commit point of the shard. ++ +.Properties of `commit` +[%collapsible%open] +======== + +`id`:: +(string) Base64 version of the commit ID. + +`generation`:: +(integer) Lucene generation of the commit. + +`user_data`:: +(object) Contains additional technical information about the commit. + +`num_docs`:: +(integer) The number of docs in the commit. + +======== + +`seq_no`:: +(object) Contains information about <> and checkpoints for the shard. ++ +.Properties of `seq_no` +[%collapsible%open] +======== + +`max_seq_no`:: +(integer) The maximum sequence number issued so far. + +`local_checkpoint`:: +(integer) The current local checkpoint of the shard. + +`global_checkpoint`:: +(integer) The current global checkpoint of the shard. + +======== + +`retention_leases`:: +(object) Contains information about <>. ++ +.Properties of `retention_leases` +[%collapsible%open] +======== + +`primary_term`:: +(integer) The primary term of this retention lease collection. + +`version`:: +(integer) The current version of the retention lease collection. + +`leases`:: +(array of objects) List of current leases for this shard. ++ +.Properties of `leases` +[%collapsible%open] +========= + +`id`:: +(string) The ID of the lease. + +`retaining_seq_no`:: +(integer) The minimum sequence number to be retained by the lease. + +`timestamp`:: +(integer) The timestamp of when the lease was created or renewed. +Recorded in milliseconds since the {wikipedia}/Unix_time[Unix Epoch]. + +`source`:: +(string) The source of the lease. + +========= +======== + +`shard_path`:: +(object) ++ +.Properties of `shard_path` +[%collapsible%open] +======== + +`state_path`:: +(string) The state-path root, without the index name and the shard ID. + +`data_path`:: +(string) The data-path root, without the index name and the shard ID. + +`is_custom_data_path`:: +(boolean) Whether the data path is a custom data location and therefore outside of the nodes configured data paths. + +======== + +`search_idle`:: +(boolean) Whether the shard is <> or not. + +`search_idle_time`:: +(integer) Time since previous searcher access. +Recorded in milliseconds. + +======= ====== [[cluster-nodes-stats-api-response-body-os]] diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index 7c2e42a26b923..e5ab10b7d71ba 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -649,8 +649,9 @@ tag::level[] + -- (Optional, string) -Indicates whether statistics are aggregated -at the cluster, index, or shard level. +Indicates whether statistics are aggregated at the cluster, index, or shard level. +If the shards level is requested, some additional +<> are shown. Valid values are: From adbd3672871d5f42b152fc64e4786d14b4c1c56d Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Tue, 13 Aug 2024 07:34:46 -0700 Subject: [PATCH 009/389] Fix DateFieldMapperTests#testBlockLoaderFromColumnReaderWithSyntheticSource (#111816) --- .../index/mapper/DateFieldMapperTests.java | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index d9894df9104a1..9cfdb2c46a291 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -731,7 +731,18 @@ public void execute() { @Override protected Function loadBlockExpected() { - return v -> ((Number) v).longValue(); + return v -> asJacksonNumberOutput(((Number) v).longValue()); + } + + protected static Object asJacksonNumberOutput(long l) { + // If a long value fits in int, Jackson will write it as int in NumberOutput.outputLong() + // and we hit this during serialization of expected values. + // Code below mimics that behaviour in order for matching to work. + if (l < 0 && l >= Integer.MIN_VALUE || l >= 0 && l <= Integer.MAX_VALUE) { + return (int) l; + } else { + return l; + } } public void testLegacyField() throws Exception { From 495eebaa63a8dd667480be74cf2988d7875d77bb Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 13 Aug 2024 16:42:13 +0200 Subject: [PATCH 010/389] Save syscalls in Checkpoint.read (#111819) No need to use a real directory here. We use a plain channel for writing, we can do the same for reading since the checkpoints are tiny. This is mostly helpful in saving hundreds of seconds in syscalls in internal cluster tests. --- .../elasticsearch/index/translog/Checkpoint.java | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/translog/Checkpoint.java b/server/src/main/java/org/elasticsearch/index/translog/Checkpoint.java index 3f21c3a26ea04..7e1de5a9bc77c 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Checkpoint.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Checkpoint.java @@ -15,17 +15,15 @@ import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.DataInput; import org.apache.lucene.store.DataOutput; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.IndexInput; -import org.apache.lucene.store.NIOFSDirectory; import org.apache.lucene.store.OutputStreamIndexOutput; import org.elasticsearch.common.io.Channels; +import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.index.seqno.SequenceNumbers; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.channels.FileChannel; +import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.OpenOption; import java.nio.file.Path; @@ -166,8 +164,9 @@ public String toString() { } public static Checkpoint read(Path path) throws IOException { - try (Directory dir = new NIOFSDirectory(path.getParent())) { - try (IndexInput indexInput = dir.openInput(path.getFileName().toString(), IOContext.DEFAULT)) { + try { + final byte[] bytes = Files.readAllBytes(path); + try (ByteArrayIndexInput indexInput = new ByteArrayIndexInput(path.toString(), bytes, 0, bytes.length)) { // We checksum the entire file before we even go and parse it. If it's corrupted we barf right here. CodecUtil.checksumEntireFile(indexInput); final int fileVersion = CodecUtil.checkHeader(indexInput, CHECKPOINT_CODEC, VERSION_LUCENE_8, CURRENT_VERSION); @@ -177,9 +176,9 @@ public static Checkpoint read(Path path) throws IOException { return Checkpoint.readCheckpointV4(indexInput); } return readCheckpointV3(indexInput); - } catch (CorruptIndexException | NoSuchFileException | IndexFormatTooOldException | IndexFormatTooNewException e) { - throw new TranslogCorruptedException(path.toString(), e); } + } catch (CorruptIndexException | NoSuchFileException | IndexFormatTooOldException | IndexFormatTooNewException e) { + throw new TranslogCorruptedException(path.toString(), e); } } From 82520a166bcbafc57508d3453e140115a297e939 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Tue, 13 Aug 2024 17:53:30 +0200 Subject: [PATCH 011/389] Revert "[cache] Support async RangeMissingHandler callbacks (#111340)" (#111839) * Revert "[cache] Support async RangeMissingHandler callbacks (#111340)" This reverts commit 364bba2e6b975d9247b54e91b4ced5a8b5abe6bb. * Revert "Give executor to cache instead of string (#111711)" This reverts commit 4f70047ee4b0764270a45ca3fef8a997a42d554c. --- .../shared/SharedBlobCacheService.java | 107 +++----- .../shared/SharedBlobCacheServiceTests.java | 250 ++++++------------ .../SearchableSnapshots.java | 2 +- .../store/input/FrozenIndexInput.java | 59 ++--- .../AbstractSearchableSnapshotsTestCase.java | 6 +- .../store/input/FrozenIndexInputTests.java | 2 +- 6 files changed, 148 insertions(+), 278 deletions(-) diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 43baf34b04222..3d95db72e269d 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -335,7 +335,7 @@ public SharedBlobCacheService( NodeEnvironment environment, Settings settings, ThreadPool threadPool, - Executor ioExecutor, + String ioExecutor, BlobCacheMetrics blobCacheMetrics ) { this(environment, settings, threadPool, ioExecutor, blobCacheMetrics, System::nanoTime); @@ -345,12 +345,12 @@ public SharedBlobCacheService( NodeEnvironment environment, Settings settings, ThreadPool threadPool, - Executor ioExecutor, + String ioExecutor, BlobCacheMetrics blobCacheMetrics, LongSupplier relativeTimeInNanosSupplier ) { this.threadPool = threadPool; - this.ioExecutor = ioExecutor; + this.ioExecutor = threadPool.executor(ioExecutor); long totalFsSize; try { totalFsSize = FsProbe.getTotal(Environment.getFileStore(environment.nodeDataPaths()[0])); @@ -646,14 +646,13 @@ private RangeMissingHandler writerWithOffset(RangeMissingHandler writer, int wri // no need to allocate a new capturing lambda if the offset isn't adjusted return writer; } - return (channel, channelPos, streamFactory, relativePos, len, progressUpdater, completionListener) -> writer.fillCacheRange( + return (channel, channelPos, streamFactory, relativePos, len, progressUpdater) -> writer.fillCacheRange( channel, channelPos, streamFactory, relativePos - writeOffset, len, - progressUpdater, - completionListener + progressUpdater ); } @@ -988,17 +987,16 @@ void populateAndRead( executor.execute(fillGapRunnable(gap, writer, null, refs.acquireListener())); } } else { - var gapFillingListener = refs.acquireListener(); - try (var gfRefs = new RefCountingRunnable(ActionRunnable.run(gapFillingListener, streamFactory::close))) { - final List gapFillingTasks = gaps.stream() - .map(gap -> fillGapRunnable(gap, writer, streamFactory, gfRefs.acquireListener())) - .toList(); - executor.execute(() -> { + final List gapFillingTasks = gaps.stream() + .map(gap -> fillGapRunnable(gap, writer, streamFactory, refs.acquireListener())) + .toList(); + executor.execute(() -> { + try (streamFactory) { // Fill the gaps in order. If a gap fails to fill for whatever reason, the task for filling the next // gap will still be executed. gapFillingTasks.forEach(Runnable::run); - }); - } + } + }); } } } @@ -1007,13 +1005,13 @@ void populateAndRead( } } - private Runnable fillGapRunnable( + private AbstractRunnable fillGapRunnable( SparseFileTracker.Gap gap, RangeMissingHandler writer, @Nullable SourceInputStreamFactory streamFactory, ActionListener listener ) { - return () -> ActionListener.run(listener, l -> { + return ActionRunnable.run(listener.delegateResponse((l, e) -> failGapAndListener(gap, l, e)), () -> { var ioRef = io; assert regionOwners.get(ioRef) == CacheFileRegion.this; assert CacheFileRegion.this.hasReferences() : CacheFileRegion.this; @@ -1024,15 +1022,10 @@ private Runnable fillGapRunnable( streamFactory, start, Math.toIntExact(gap.end() - start), - progress -> gap.onProgress(start + progress), - l.map(unused -> { - assert regionOwners.get(ioRef) == CacheFileRegion.this; - assert CacheFileRegion.this.hasReferences() : CacheFileRegion.this; - writeCount.increment(); - gap.onCompletion(); - return null; - }).delegateResponse((delegate, e) -> failGapAndListener(gap, delegate, e)) + progress -> gap.onProgress(start + progress) ); + writeCount.increment(); + gap.onCompletion(); }); } @@ -1120,23 +1113,12 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int length, - IntConsumer progressUpdater, - ActionListener completionListener + IntConsumer progressUpdater ) throws IOException { - writer.fillCacheRange( - channel, - channelPos, - streamFactory, - relativePos, - length, - progressUpdater, - completionListener.map(unused -> { - var elapsedTime = TimeUnit.NANOSECONDS.toMillis(relativeTimeInNanosSupplier.getAsLong() - startTime); - blobCacheMetrics.getCacheMissLoadTimes().record(elapsedTime); - blobCacheMetrics.getCacheMissCounter().increment(); - return null; - }) - ); + writer.fillCacheRange(channel, channelPos, streamFactory, relativePos, length, progressUpdater); + var elapsedTime = TimeUnit.NANOSECONDS.toMillis(relativeTimeInNanosSupplier.getAsLong() - startTime); + SharedBlobCacheService.this.blobCacheMetrics.getCacheMissLoadTimes().record(elapsedTime); + SharedBlobCacheService.this.blobCacheMetrics.getCacheMissCounter().increment(); } }; if (rangeToRead.isEmpty()) { @@ -1229,18 +1211,9 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int len, - IntConsumer progressUpdater, - ActionListener completionListener + IntConsumer progressUpdater ) throws IOException { - delegate.fillCacheRange( - channel, - channelPos, - streamFactory, - relativePos - writeOffset, - len, - progressUpdater, - completionListener - ); + delegate.fillCacheRange(channel, channelPos, streamFactory, relativePos - writeOffset, len, progressUpdater); } }; } @@ -1253,25 +1226,14 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int len, - IntConsumer progressUpdater, - ActionListener completionListener + IntConsumer progressUpdater ) throws IOException { assert assertValidRegionAndLength(fileRegion, channelPos, len); - delegate.fillCacheRange( - channel, - channelPos, - streamFactory, - relativePos, - len, - progressUpdater, - Assertions.ENABLED ? ActionListener.runBefore(completionListener, () -> { - assert regionOwners.get(fileRegion.io) == fileRegion - : "File chunk [" + fileRegion.regionKey + "] no longer owns IO [" + fileRegion.io + "]"; - }) : completionListener - ); + delegate.fillCacheRange(channel, channelPos, streamFactory, relativePos, len, progressUpdater); + assert regionOwners.get(fileRegion.io) == fileRegion + : "File chunk [" + fileRegion.regionKey + "] no longer owns IO [" + fileRegion.io + "]"; } }; - } return adjustedWriter; } @@ -1358,7 +1320,6 @@ default SourceInputStreamFactory sharedInputStreamFactory(List completionListener + IntConsumer progressUpdater ) throws IOException; } @@ -1379,9 +1339,9 @@ public interface SourceInputStreamFactory extends Releasable { /** * Create the input stream at the specified position. * @param relativePos the relative position in the remote storage to read from. - * @param listener listener for the input stream ready to be read from. + * @return the input stream ready to be read from. */ - void create(int relativePos, ActionListener listener) throws IOException; + InputStream create(int relativePos) throws IOException; } private abstract static class DelegatingRangeMissingHandler implements RangeMissingHandler { @@ -1403,10 +1363,9 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int length, - IntConsumer progressUpdater, - ActionListener completionListener + IntConsumer progressUpdater ) throws IOException { - delegate.fillCacheRange(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener); + delegate.fillCacheRange(channel, channelPos, streamFactory, relativePos, length, progressUpdater); } } diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java index 346950d385a40..e477673c90d6d 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.StoppableExecutorServiceWrapper; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.TestEnvironment; @@ -73,13 +72,6 @@ private static long size(long numPages) { return numPages * SharedBytes.PAGE_SIZE; } - private static void completeWith(ActionListener listener, CheckedRunnable runnable) { - ActionListener.completeWith(listener, () -> { - runnable.run(); - return null; - }); - } - public void testBasicEviction() throws IOException { Settings settings = Settings.builder() .put(NODE_NAME_SETTING.getKey(), "node") @@ -94,7 +86,7 @@ public void testBasicEviction() throws IOException { environment, settings, taskQueue.getThreadPool(), - taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), + ThreadPool.Names.GENERIC, BlobCacheMetrics.NOOP ) ) { @@ -123,10 +115,7 @@ public void testBasicEviction() throws IOException { ByteRange.of(0L, 1L), ByteRange.of(0L, 1L), (channel, channelPos, relativePos, length) -> 1, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( - completionListener, - () -> progressUpdater.accept(length) - ), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> progressUpdater.accept(length), taskQueue.getThreadPool().generic(), bytesReadFuture ); @@ -175,7 +164,7 @@ public void testAutoEviction() throws IOException { environment, settings, taskQueue.getThreadPool(), - taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), + ThreadPool.Names.GENERIC, BlobCacheMetrics.NOOP ) ) { @@ -219,7 +208,7 @@ public void testForceEviction() throws IOException { environment, settings, taskQueue.getThreadPool(), - taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), + ThreadPool.Names.GENERIC, BlobCacheMetrics.NOOP ) ) { @@ -253,7 +242,7 @@ public void testForceEvictResponse() throws IOException { environment, settings, taskQueue.getThreadPool(), - taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), + ThreadPool.Names.GENERIC, BlobCacheMetrics.NOOP ) ) { @@ -287,7 +276,7 @@ public void testDecay() throws IOException { environment, settings, taskQueue.getThreadPool(), - taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), + ThreadPool.Names.GENERIC, BlobCacheMetrics.NOOP ) ) { @@ -395,7 +384,7 @@ public void testMassiveDecay() throws IOException { environment, settings, taskQueue.getThreadPool(), - taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), + ThreadPool.Names.GENERIC, BlobCacheMetrics.NOOP ) ) { @@ -470,7 +459,7 @@ public void testGetMultiThreaded() throws IOException { environment, settings, threadPool, - threadPool.executor(ThreadPool.Names.GENERIC), + ThreadPool.Names.GENERIC, BlobCacheMetrics.NOOP ) ) { @@ -550,7 +539,7 @@ public void execute(Runnable command) { environment, settings, threadPool, - threadPool.executor(ThreadPool.Names.GENERIC), + ThreadPool.Names.GENERIC, BlobCacheMetrics.NOOP ) ) { @@ -563,14 +552,11 @@ public void execute(Runnable command) { cacheService.maybeFetchFullEntry( cacheKey, size, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( - completionListener, - () -> { - assert streamFactory == null : streamFactory; - bytesRead.addAndGet(-length); - progressUpdater.accept(length); - } - ), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { + assert streamFactory == null : streamFactory; + bytesRead.addAndGet(-length); + progressUpdater.accept(length); + }, bulkExecutor, future ); @@ -584,15 +570,9 @@ public void execute(Runnable command) { // a download that would use up all regions should not run final var cacheKey = generateCacheKey(); assertEquals(2, cacheService.freeRegionCount()); - var configured = cacheService.maybeFetchFullEntry( - cacheKey, - size(500), - (ch, chPos, streamFactory, relPos, len, update, completionListener) -> completeWith(completionListener, () -> { - throw new AssertionError("Should never reach here"); - }), - bulkExecutor, - ActionListener.noop() - ); + var configured = cacheService.maybeFetchFullEntry(cacheKey, size(500), (ch, chPos, streamFactory, relPos, len, update) -> { + throw new AssertionError("Should never reach here"); + }, bulkExecutor, ActionListener.noop()); assertFalse(configured); assertEquals(2, cacheService.freeRegionCount()); } @@ -618,7 +598,7 @@ public void testFetchFullCacheEntryConcurrently() throws Exception { environment, settings, threadPool, - threadPool.executor(ThreadPool.Names.GENERIC), + ThreadPool.Names.GENERIC, BlobCacheMetrics.NOOP ) ) { @@ -633,14 +613,9 @@ public void testFetchFullCacheEntryConcurrently() throws Exception { (ActionListener listener) -> cacheService.maybeFetchFullEntry( cacheKey, size, - ( - channel, - channelPos, - streamFactory, - relativePos, - length, - progressUpdater, - completionListener) -> completeWith(completionListener, () -> progressUpdater.accept(length)), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> progressUpdater.accept( + length + ), bulkExecutor, listener ) @@ -826,7 +801,7 @@ public void testCacheSizeChanges() throws IOException { environment, settings, taskQueue.getThreadPool(), - taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), + ThreadPool.Names.GENERIC, BlobCacheMetrics.NOOP ) ) { @@ -844,7 +819,7 @@ public void testCacheSizeChanges() throws IOException { environment, settings, taskQueue.getThreadPool(), - taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), + ThreadPool.Names.GENERIC, BlobCacheMetrics.NOOP ) ) { @@ -869,7 +844,7 @@ public void testMaybeEvictLeastUsed() throws Exception { environment, settings, taskQueue.getThreadPool(), - taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), + ThreadPool.Names.GENERIC, BlobCacheMetrics.NOOP ) ) { @@ -884,10 +859,7 @@ public void testMaybeEvictLeastUsed() throws Exception { var entry = cacheService.get(cacheKey, regionSize, 0); entry.populate( ByteRange.of(0L, regionSize), - (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( - completionListener, - () -> progressUpdater.accept(length) - ), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> progressUpdater.accept(length), taskQueue.getThreadPool().generic(), ActionListener.noop() ); @@ -967,7 +939,7 @@ public void execute(Runnable command) { environment, settings, threadPool, - threadPool.executor(ThreadPool.Names.GENERIC), + ThreadPool.Names.GENERIC, BlobCacheMetrics.NOOP ) ) { @@ -982,14 +954,11 @@ public void execute(Runnable command) { cacheKey, 0, blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( - completionListener, - () -> { - assert streamFactory == null : streamFactory; - bytesRead.addAndGet(length); - progressUpdater.accept(length); - } - ), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { + assert streamFactory == null : streamFactory; + bytesRead.addAndGet(length); + progressUpdater.accept(length); + }, bulkExecutor, future ); @@ -1016,14 +985,11 @@ public void execute(Runnable command) { cacheKey, region, blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( - completionListener, - () -> { - assert streamFactory == null : streamFactory; - bytesRead.addAndGet(length); - progressUpdater.accept(length); - } - ), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { + assert streamFactory == null : streamFactory; + bytesRead.addAndGet(length); + progressUpdater.accept(length); + }, bulkExecutor, listener ); @@ -1044,12 +1010,9 @@ public void execute(Runnable command) { cacheKey, randomIntBetween(0, 10), randomLongBetween(1L, regionSize), - (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( - completionListener, - () -> { - throw new AssertionError("should not be executed"); - } - ), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { + throw new AssertionError("should not be executed"); + }, bulkExecutor, future ); @@ -1069,14 +1032,11 @@ public void execute(Runnable command) { cacheKey, 0, blobLength, - (channel, channelPos, ignore, relativePos, length, progressUpdater, completionListener) -> completeWith( - completionListener, - () -> { - assert ignore == null : ignore; - bytesRead.addAndGet(length); - progressUpdater.accept(length); - } - ), + (channel, channelPos, ignore, relativePos, length, progressUpdater) -> { + assert ignore == null : ignore; + bytesRead.addAndGet(length); + progressUpdater.accept(length); + }, bulkExecutor, future ); @@ -1117,7 +1077,7 @@ public void execute(Runnable command) { environment, settings, threadPool, - threadPool.executor(ThreadPool.Names.GENERIC), + ThreadPool.Names.GENERIC, BlobCacheMetrics.NOOP ) ) { @@ -1150,15 +1110,12 @@ public void execute(Runnable command) { region, range, blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( - completionListener, - () -> { - assertThat(range.start() + relativePos, equalTo(cacheService.getRegionStart(region) + regionRange.start())); - assertThat(channelPos, equalTo(Math.toIntExact(regionRange.start()))); - assertThat(length, equalTo(Math.toIntExact(regionRange.length()))); - bytesCopied.addAndGet(length); - } - ), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { + assertThat(range.start() + relativePos, equalTo(cacheService.getRegionStart(region) + regionRange.start())); + assertThat(channelPos, equalTo(Math.toIntExact(regionRange.start()))); + assertThat(length, equalTo(Math.toIntExact(regionRange.length()))); + bytesCopied.addAndGet(length); + }, bulkExecutor, future ); @@ -1193,10 +1150,7 @@ public void execute(Runnable command) { region, ByteRange.of(0L, blobLength), blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( - completionListener, - () -> bytesCopied.addAndGet(length) - ), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> bytesCopied.addAndGet(length), bulkExecutor, listener ); @@ -1219,12 +1173,9 @@ public void execute(Runnable command) { randomIntBetween(0, 10), ByteRange.of(0L, blobLength), blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( - completionListener, - () -> { - throw new AssertionError("should not be executed"); - } - ), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { + throw new AssertionError("should not be executed"); + }, bulkExecutor, future ); @@ -1245,10 +1196,7 @@ public void execute(Runnable command) { 0, ByteRange.of(0L, blobLength), blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( - completionListener, - () -> bytesCopied.addAndGet(length) - ), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> bytesCopied.addAndGet(length), bulkExecutor, future ); @@ -1278,7 +1226,7 @@ public void testPopulate() throws Exception { environment, settings, taskQueue.getThreadPool(), - taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), + ThreadPool.Names.GENERIC, BlobCacheMetrics.NOOP ) ) { @@ -1289,18 +1237,10 @@ public void testPopulate() throws Exception { var entry = cacheService.get(cacheKey, blobLength, 0); AtomicLong bytesWritten = new AtomicLong(0L); final PlainActionFuture future1 = new PlainActionFuture<>(); - entry.populate( - ByteRange.of(0, regionSize - 1), - (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( - completionListener, - () -> { - bytesWritten.addAndGet(length); - progressUpdater.accept(length); - } - ), - taskQueue.getThreadPool().generic(), - future1 - ); + entry.populate(ByteRange.of(0, regionSize - 1), (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { + bytesWritten.addAndGet(length); + progressUpdater.accept(length); + }, taskQueue.getThreadPool().generic(), future1); assertThat(future1.isDone(), is(false)); assertThat(taskQueue.hasRunnableTasks(), is(true)); @@ -1308,34 +1248,18 @@ public void testPopulate() throws Exception { // start populating the second region entry = cacheService.get(cacheKey, blobLength, 1); final PlainActionFuture future2 = new PlainActionFuture<>(); - entry.populate( - ByteRange.of(0, regionSize - 1), - (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( - completionListener, - () -> { - bytesWritten.addAndGet(length); - progressUpdater.accept(length); - } - ), - taskQueue.getThreadPool().generic(), - future2 - ); + entry.populate(ByteRange.of(0, regionSize - 1), (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { + bytesWritten.addAndGet(length); + progressUpdater.accept(length); + }, taskQueue.getThreadPool().generic(), future2); // start populating again the first region, listener should be called immediately entry = cacheService.get(cacheKey, blobLength, 0); final PlainActionFuture future3 = new PlainActionFuture<>(); - entry.populate( - ByteRange.of(0, regionSize - 1), - (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( - completionListener, - () -> { - bytesWritten.addAndGet(length); - progressUpdater.accept(length); - } - ), - taskQueue.getThreadPool().generic(), - future3 - ); + entry.populate(ByteRange.of(0, regionSize - 1), (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { + bytesWritten.addAndGet(length); + progressUpdater.accept(length); + }, taskQueue.getThreadPool().generic(), future3); assertThat(future3.isDone(), is(true)); var written = future3.get(10L, TimeUnit.SECONDS); @@ -1394,7 +1318,7 @@ public void testUseFullRegionSize() throws IOException { environment, settings, taskQueue.getThreadPool(), - taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), + ThreadPool.Names.GENERIC, BlobCacheMetrics.NOOP ) { @Override @@ -1435,7 +1359,7 @@ public void testSharedSourceInputStreamFactory() throws Exception { environment, settings, threadPool, - threadPool.executor(ThreadPool.Names.GENERIC), + ThreadPool.Names.GENERIC, BlobCacheMetrics.NOOP ) ) { @@ -1453,10 +1377,7 @@ public void testSharedSourceInputStreamFactory() throws Exception { range, range, (channel, channelPos, relativePos, length) -> length, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( - completionListener, - () -> progressUpdater.accept(length) - ), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> progressUpdater.accept(length), EsExecutors.DIRECT_EXECUTOR_SERVICE, future ); @@ -1473,8 +1394,8 @@ public void testSharedSourceInputStreamFactory() throws Exception { final var factoryClosed = new AtomicBoolean(false); final var dummyStreamFactory = new SourceInputStreamFactory() { @Override - public void create(int relativePos, ActionListener listener) { - listener.onResponse(null); + public InputStream create(int relativePos) { + return null; } @Override @@ -1499,20 +1420,17 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int length, - IntConsumer progressUpdater, - ActionListener completion + IntConsumer progressUpdater ) throws IOException { - completeWith(completion, () -> { - if (invocationCounter.incrementAndGet() == 1) { - final Thread witness = invocationThread.compareAndExchange(null, Thread.currentThread()); - assertThat(witness, nullValue()); - } else { - assertThat(invocationThread.get(), sameInstance(Thread.currentThread())); - } - assertThat(streamFactory, sameInstance(dummyStreamFactory)); - assertThat(position.getAndSet(relativePos), lessThan(relativePos)); - progressUpdater.accept(length); - }); + if (invocationCounter.incrementAndGet() == 1) { + final Thread witness = invocationThread.compareAndExchange(null, Thread.currentThread()); + assertThat(witness, nullValue()); + } else { + assertThat(invocationThread.get(), sameInstance(Thread.currentThread())); + } + assertThat(streamFactory, sameInstance(dummyStreamFactory)); + assertThat(position.getAndSet(relativePos), lessThan(relativePos)); + progressUpdater.accept(length); } }; diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java index 4eea006b4c2f2..18ebe65d87986 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java @@ -331,7 +331,7 @@ public Collection createComponents(PluginServices services) { nodeEnvironment, settings, threadPool, - threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), + SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, new BlobCacheMetrics(services.telemetryProvider().getMeterRegistry()) ); this.frozenCacheService.set(sharedBlobCacheService); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java index d7cf22a05981f..56efc72f2f6f7 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.blobcache.BlobCacheUtils; import org.elasticsearch.blobcache.common.ByteBufferReference; import org.elasticsearch.blobcache.common.ByteRange; @@ -147,38 +146,32 @@ private void readWithoutBlobCacheSlow(ByteBuffer b, long position, int length) t final int read = SharedBytes.readCacheFile(channel, pos, relativePos, len, byteBufferReference); stats.addCachedBytesRead(read); return read; - }, - (channel, channelPos, streamFactory, relativePos, len, progressUpdater, completionListener) -> ActionListener.completeWith( - completionListener, - () -> { - assert streamFactory == null : streamFactory; - final long startTimeNanos = stats.currentTimeNanos(); - try (InputStream input = openInputStreamFromBlobStore(rangeToWrite.start() + relativePos, len)) { - assert ThreadPool.assertCurrentThreadPool(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME); - logger.trace( - "{}: writing channel {} pos {} length {} (details: {})", - fileInfo.physicalName(), - channelPos, - relativePos, - len, - cacheFile - ); - SharedBytes.copyToCacheFileAligned( - channel, - input, - channelPos, - relativePos, - len, - progressUpdater, - writeBuffer.get().clear() - ); - final long endTimeNanos = stats.currentTimeNanos(); - stats.addCachedBytesWritten(len, endTimeNanos - startTimeNanos); - return null; - } - } - ) - ); + }, (channel, channelPos, streamFactory, relativePos, len, progressUpdater) -> { + assert streamFactory == null : streamFactory; + final long startTimeNanos = stats.currentTimeNanos(); + try (InputStream input = openInputStreamFromBlobStore(rangeToWrite.start() + relativePos, len)) { + assert ThreadPool.assertCurrentThreadPool(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME); + logger.trace( + "{}: writing channel {} pos {} length {} (details: {})", + fileInfo.physicalName(), + channelPos, + relativePos, + len, + cacheFile + ); + SharedBytes.copyToCacheFileAligned( + channel, + input, + channelPos, + relativePos, + len, + progressUpdater, + writeBuffer.get().clear() + ); + final long endTimeNanos = stats.currentTimeNanos(); + stats.addCachedBytesWritten(len, endTimeNanos - startTimeNanos); + } + }); assert bytesRead == length : bytesRead + " vs " + length; byteBufferReference.finish(bytesRead); } finally { diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java index 41121453e41a4..5f083d568fed8 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java @@ -144,7 +144,7 @@ protected SharedBlobCacheService defaultFrozenCacheService() { nodeEnvironment, Settings.EMPTY, threadPool, - threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), + SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, BlobCacheMetrics.NOOP ); } @@ -167,7 +167,7 @@ protected SharedBlobCacheService randomFrozenCacheService() { singlePathNodeEnvironment, cacheSettings.build(), threadPool, - threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), + SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, BlobCacheMetrics.NOOP ); } @@ -192,7 +192,7 @@ protected SharedBlobCacheService createFrozenCacheService(final ByteSi .put(SharedBlobCacheService.SHARED_CACHE_RANGE_SIZE_SETTING.getKey(), cacheRangeSize) .build(), threadPool, - threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), + SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, BlobCacheMetrics.NOOP ); } diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java index 53ea908ad8801..81e9c06a149b9 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java @@ -111,7 +111,7 @@ public void testRandomReads() throws IOException { nodeEnvironment, settings, threadPool, - threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), + SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, BlobCacheMetrics.NOOP ); CacheService cacheService = randomCacheService(); From a03affe40cbc7d6870013fbd2366c30a209cd313 Mon Sep 17 00:00:00 2001 From: kosabogi <105062005+kosabogi@users.noreply.github.com> Date: Tue, 13 Aug 2024 18:51:49 +0200 Subject: [PATCH 012/389] [DOCS] Fixes the description of 'affected_resources' in health API documentation (#111833) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fixed the description of 'affected_resources' in health API documentation * Update docs/reference/health/health.asciidoc Co-authored-by: István Zoltán Szabó --------- Co-authored-by: István Zoltán Szabó --- docs/reference/health/health.asciidoc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/reference/health/health.asciidoc b/docs/reference/health/health.asciidoc index 6ac7bd2001d45..34714e80e1b18 100644 --- a/docs/reference/health/health.asciidoc +++ b/docs/reference/health/health.asciidoc @@ -204,9 +204,8 @@ for health status set `verbose` to `false` to disable the more expensive analysi `help_url` field. `affected_resources`:: - (Optional, array of strings) If the root cause pertains to multiple resources in the - cluster (like indices, shards, nodes, etc...) this will hold all resources that this - diagnosis is applicable for. + (Optional, object) An object where the keys represent resource types (for example, indices, shards), + and the values are lists of the specific resources affected by the issue. `help_url`:: (string) A link to the troubleshooting guide that'll fix the health problem. From 097fc0654f9305e01402a06c82926bb04ebe5495 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Tue, 13 Aug 2024 20:07:52 +0200 Subject: [PATCH 013/389] Add maximum nested depth check to WKT parser (#111843) * Add maximum nested depth check to WKT parser This prevents StackOverflowErrors, replacing them with ParseException errors, which is more easily managed by running servers. * Update docs/changelog/111843.yaml --- docs/changelog/111843.yaml | 5 ++ .../geometry/utils/WellKnownText.java | 54 +++++++++---------- .../geometry/GeometryCollectionTests.java | 27 ++++++++++ 3 files changed, 57 insertions(+), 29 deletions(-) create mode 100644 docs/changelog/111843.yaml diff --git a/docs/changelog/111843.yaml b/docs/changelog/111843.yaml new file mode 100644 index 0000000000000..c8b20036520f3 --- /dev/null +++ b/docs/changelog/111843.yaml @@ -0,0 +1,5 @@ +pr: 111843 +summary: Add maximum nested depth check to WKT parser +area: Geo +type: bug +issues: [] diff --git a/libs/geo/src/main/java/org/elasticsearch/geometry/utils/WellKnownText.java b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/WellKnownText.java index d233dcc81a3fc..1e7ac3f8097e9 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geometry/utils/WellKnownText.java +++ b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/WellKnownText.java @@ -43,6 +43,7 @@ public class WellKnownText { public static final String RPAREN = ")"; public static final String COMMA = ","; public static final String NAN = "NaN"; + public static final int MAX_NESTED_DEPTH = 1000; private static final String NUMBER = ""; private static final String EOF = "END-OF-STREAM"; @@ -425,7 +426,7 @@ public static Geometry fromWKT(GeometryValidator validator, boolean coerce, Stri tokenizer.whitespaceChars('\r', '\r'); tokenizer.whitespaceChars('\n', '\n'); tokenizer.commentChar('#'); - Geometry geometry = parseGeometry(tokenizer, coerce); + Geometry geometry = parseGeometry(tokenizer, coerce, 0); validator.validate(geometry); return geometry; } finally { @@ -436,40 +437,35 @@ public static Geometry fromWKT(GeometryValidator validator, boolean coerce, Stri /** * parse geometry from the stream tokenizer */ - private static Geometry parseGeometry(StreamTokenizer stream, boolean coerce) throws IOException, ParseException { + private static Geometry parseGeometry(StreamTokenizer stream, boolean coerce, int depth) throws IOException, ParseException { final String type = nextWord(stream).toLowerCase(Locale.ROOT); - switch (type) { - case "point": - return parsePoint(stream); - case "multipoint": - return parseMultiPoint(stream); - case "linestring": - return parseLine(stream); - case "multilinestring": - return parseMultiLine(stream); - case "polygon": - return parsePolygon(stream, coerce); - case "multipolygon": - return parseMultiPolygon(stream, coerce); - case "bbox": - return parseBBox(stream); - case "geometrycollection": - return parseGeometryCollection(stream, coerce); - case "circle": // Not part of the standard, but we need it for internal serialization - return parseCircle(stream); - } - throw new IllegalArgumentException("Unknown geometry type: " + type); - } - - private static GeometryCollection parseGeometryCollection(StreamTokenizer stream, boolean coerce) throws IOException, - ParseException { + return switch (type) { + case "point" -> parsePoint(stream); + case "multipoint" -> parseMultiPoint(stream); + case "linestring" -> parseLine(stream); + case "multilinestring" -> parseMultiLine(stream); + case "polygon" -> parsePolygon(stream, coerce); + case "multipolygon" -> parseMultiPolygon(stream, coerce); + case "bbox" -> parseBBox(stream); + case "geometrycollection" -> parseGeometryCollection(stream, coerce, depth + 1); + case "circle" -> // Not part of the standard, but we need it for internal serialization + parseCircle(stream); + default -> throw new IllegalArgumentException("Unknown geometry type: " + type); + }; + } + + private static GeometryCollection parseGeometryCollection(StreamTokenizer stream, boolean coerce, int depth) + throws IOException, ParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return GeometryCollection.EMPTY; } + if (depth > MAX_NESTED_DEPTH) { + throw new ParseException("maximum nested depth of " + MAX_NESTED_DEPTH + " exceeded", stream.lineno()); + } List shapes = new ArrayList<>(); - shapes.add(parseGeometry(stream, coerce)); + shapes.add(parseGeometry(stream, coerce, depth)); while (nextCloserOrComma(stream).equals(COMMA)) { - shapes.add(parseGeometry(stream, coerce)); + shapes.add(parseGeometry(stream, coerce, depth)); } return new GeometryCollection<>(shapes); } diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/GeometryCollectionTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/GeometryCollectionTests.java index 6a7bda7f9e0bb..b3f7aa610153b 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/GeometryCollectionTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/GeometryCollectionTests.java @@ -19,6 +19,8 @@ import java.util.Arrays; import java.util.Collections; +import static org.hamcrest.Matchers.containsString; + public class GeometryCollectionTests extends BaseGeometryTestCase> { @Override protected GeometryCollection createTestInstance(boolean hasAlt) { @@ -65,6 +67,31 @@ public void testInitValidation() { StandardValidator.instance(true).validate(new GeometryCollection(Collections.singletonList(new Point(20, 10, 30)))); } + public void testDeeplyNestedCollection() throws IOException, ParseException { + String wkt = makeDeeplyNestedGeometryCollectionWKT(WellKnownText.MAX_NESTED_DEPTH); + Geometry parsed = WellKnownText.fromWKT(GeographyValidator.instance(true), true, wkt); + assertEquals(WellKnownText.MAX_NESTED_DEPTH, countNestedGeometryCollections((GeometryCollection) parsed)); + } + + public void testTooDeeplyNestedCollection() { + String wkt = makeDeeplyNestedGeometryCollectionWKT(WellKnownText.MAX_NESTED_DEPTH + 1); + ParseException ex = expectThrows(ParseException.class, () -> WellKnownText.fromWKT(GeographyValidator.instance(true), true, wkt)); + assertThat(ex.getMessage(), containsString("maximum nested depth of " + WellKnownText.MAX_NESTED_DEPTH)); + } + + private String makeDeeplyNestedGeometryCollectionWKT(int depth) { + return "GEOMETRYCOLLECTION (".repeat(depth) + "POINT (20.0 10.0)" + ")".repeat(depth); + } + + private int countNestedGeometryCollections(GeometryCollection geometry) { + int count = 1; + while (geometry.get(0) instanceof GeometryCollection g) { + count += 1; + geometry = g; + } + return count; + } + @Override protected GeometryCollection mutateInstance(GeometryCollection instance) { return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 From 656b5db3d1e70d3b97d41948212c7e7ef3b4e8d6 Mon Sep 17 00:00:00 2001 From: James Baiera Date: Tue, 13 Aug 2024 15:24:42 -0400 Subject: [PATCH 014/389] Fix failure store pipeline-level failure recording issues (#111802) Adds additional tests and fixes some edge cases related to rerouting documents in ingest and persisting their failures to failure stores. --------- Co-authored-by: Niels Bauman <33722607+nielsbauman@users.noreply.github.com> --- .../190_failure_store_redirection.yml | 431 +++++++++++++++++- .../elasticsearch/ElasticsearchException.java | 6 + .../org/elasticsearch/TransportVersions.java | 1 + .../bulk/FailureStoreDocumentConverter.java | 14 +- .../ingest/IngestPipelineException.java | 37 ++ .../elasticsearch/ingest/IngestService.java | 52 ++- .../ExceptionSerializationTests.java | 2 + .../FailureStoreDocumentConverterTests.java | 2 +- 8 files changed, 513 insertions(+), 32 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/ingest/IngestPipelineException.java diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml index 0b3007021cad8..991504b27f65f 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml @@ -318,4 +318,433 @@ teardown: index: .fs-destination-* - length: { hits.hits: 1 } - match: { hits.hits.0._index: "/\\.fs-destination-data-stream-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } - - match: { hits.hits.0._source.document.index: 'destination-data-stream' } + - match: { hits.hits.0._source.document.index: 'logs-foobar' } + +--- +"Failure redirects to original failure store during index change if self referenced": + - requires: + cluster_features: [ "gte_v8.15.0" ] + reason: "data stream failure stores REST structure changed in 8.15+" + test_runner_features: [ allowed_warnings, contains ] + + - do: + ingest.put_pipeline: + id: "failing_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "set": { + "field": "_index", + "value": "logs-elsewhere" + } + }, + { + "script": { + "source": "ctx.object.data = ctx.object" + } + } + ] + } + - match: { acknowledged: true } + + - do: + allowed_warnings: + - "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation" + indices.put_index_template: + name: generic_logs_template + body: + index_patterns: logs-* + data_stream: + failure_store: true + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + index: + default_pipeline: "failing_pipeline" + + - do: + index: + index: logs-foobar + refresh: true + body: + '@timestamp': '2020-12-12' + object: + data: + field: 'someValue' + + - do: + indices.get_data_stream: + name: logs-foobar + - match: { data_streams.0.name: logs-foobar } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.enabled: true } + - length: { data_streams.0.failure_store.indices: 1 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + + - do: + search: + index: logs-foobar + body: { query: { match_all: { } } } + - length: { hits.hits: 0 } + + - do: + search: + index: .fs-logs-foobar-* + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: "/\\.fs-logs-foobar-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000001/" } + - exists: hits.hits.0._source.@timestamp + - not_exists: hits.hits.0._source.foo + - not_exists: hits.hits.0._source.document.id + - match: { hits.hits.0._source.document.index: 'logs-foobar' } + - match: { hits.hits.0._source.document.source.@timestamp: '2020-12-12' } + - match: { hits.hits.0._source.document.source.object.data.field: 'someValue' } + - match: { hits.hits.0._source.error.type: 'illegal_argument_exception' } + - contains: { hits.hits.0._source.error.message: 'Failed to generate the source document for ingest pipeline' } + - contains: { hits.hits.0._source.error.stack_trace: 'Failed to generate the source document for ingest pipeline' } + - match: { hits.hits.0._source.error.pipeline_trace.0: 'failing_pipeline' } + - match: { hits.hits.0._source.error.pipeline: 'failing_pipeline' } + + - do: + indices.delete_data_stream: + name: logs-foobar + - is_true: acknowledged + + - do: + indices.delete: + index: .fs-logs-foobar-* + - is_true: acknowledged + +--- +"Failure redirects to original failure store during index change if final pipeline changes target": + - requires: + cluster_features: [ "gte_v8.15.0" ] + reason: "data stream failure stores REST structure changed in 8.15+" + test_runner_features: [ allowed_warnings, contains ] + + - do: + ingest.put_pipeline: + id: "change_index_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "set": { + "field": "_index", + "value": "logs-elsewhere" + } + } + ] + } + - match: { acknowledged: true } + + - do: + allowed_warnings: + - "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation" + indices.put_index_template: + name: generic_logs_template + body: + index_patterns: logs-* + data_stream: + failure_store: true + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + index: + final_pipeline: "change_index_pipeline" + + - do: + index: + index: logs-foobar + refresh: true + body: + '@timestamp': '2020-12-12' + foo: bar + + - do: + indices.get_data_stream: + name: logs-foobar + - match: { data_streams.0.name: logs-foobar } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.enabled: true } + - length: { data_streams.0.failure_store.indices: 1 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + + - do: + search: + index: logs-foobar + body: { query: { match_all: { } } } + - length: { hits.hits: 0 } + + - do: + search: + index: .fs-logs-foobar-* + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: "/\\.fs-logs-foobar-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000001/" } + - exists: hits.hits.0._source.@timestamp + - not_exists: hits.hits.0._source.foo + - not_exists: hits.hits.0._source.document.id + - match: { hits.hits.0._source.document.index: 'logs-foobar' } + - match: { hits.hits.0._source.document.source.@timestamp: '2020-12-12' } + - match: { hits.hits.0._source.document.source.foo: 'bar' } + - match: { hits.hits.0._source.error.type: 'illegal_state_exception' } + - contains: { hits.hits.0._source.error.message: "final pipeline [change_index_pipeline] can't change the target index" } + - contains: { hits.hits.0._source.error.stack_trace: "final pipeline [change_index_pipeline] can't change the target index" } + - match: { hits.hits.0._source.error.pipeline_trace.0: 'change_index_pipeline' } + - match: { hits.hits.0._source.error.pipeline: 'change_index_pipeline' } + + - do: + indices.delete_data_stream: + name: logs-foobar + - is_true: acknowledged + + - do: + indices.delete: + index: .fs-logs-foobar-* + - is_true: acknowledged + +--- +"Failure redirects to correct failure store when index loop is detected": + - requires: + cluster_features: [ "gte_v8.15.0" ] + reason: "data stream failure stores REST structure changed in 8.15+" + test_runner_features: [ allowed_warnings, contains ] + + - do: + ingest.put_pipeline: + id: "send_to_destination" + body: > + { + "description": "_description", + "processors": [ + { + "reroute": { + "tag": "reroute-tag-1", + "destination": "destination-data-stream" + } + } + ] + } + - match: { acknowledged: true } + + - do: + ingest.put_pipeline: + id: "send_back_to_original" + body: > + { + "description": "_description", + "processors": [ + { + "reroute": { + "tag": "reroute-tag-2", + "destination": "logs-foobar" + } + } + ] + } + - match: { acknowledged: true } + + - do: + allowed_warnings: + - "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation" + indices.put_index_template: + name: generic_logs_template + body: + index_patterns: logs-* + data_stream: + failure_store: true + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + index: + default_pipeline: "send_to_destination" + + - do: + allowed_warnings: + - "index template [destination_logs_template] has index patterns [destination-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [destination_logs_template] will take precedence during new index creation" + indices.put_index_template: + name: destination_logs_template + body: + index_patterns: destination-* + data_stream: + failure_store: true + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + index: + default_pipeline: "send_back_to_original" + + - do: + index: + index: logs-foobar + refresh: true + body: + '@timestamp': '2020-12-12' + foo: bar + + + - do: + indices.get_data_stream: + name: destination-data-stream + - match: { data_streams.0.name: destination-data-stream } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-destination-data-stream-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.enabled: true } + - length: { data_streams.0.failure_store.indices: 1 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-destination-data-stream-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + + - do: + search: + index: destination-data-stream + body: { query: { match_all: { } } } + - length: { hits.hits: 0 } + + - do: + search: + index: .fs-destination-data-stream-* + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: "/\\.fs-destination-data-stream-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000001/" } + - exists: hits.hits.0._source.@timestamp + - not_exists: hits.hits.0._source.foo + - not_exists: hits.hits.0._source.document.id + - match: { hits.hits.0._source.document.index: 'logs-foobar' } + - match: { hits.hits.0._source.document.source.@timestamp: '2020-12-12' } + - match: { hits.hits.0._source.document.source.foo: 'bar' } + - match: { hits.hits.0._source.error.type: 'illegal_state_exception' } + - contains: { hits.hits.0._source.error.message: 'index cycle detected' } + - contains: { hits.hits.0._source.error.stack_trace: 'index cycle detected' } + - match: { hits.hits.0._source.error.pipeline_trace.0: 'send_back_to_original' } + - match: { hits.hits.0._source.error.pipeline: 'send_back_to_original' } + + - do: + indices.delete_data_stream: + name: destination-data-stream + - is_true: acknowledged + + - do: + indices.delete: + index: .fs-destination-data-stream-* + - is_true: acknowledged + +--- +"Failure redirects to correct failure store when pipeline loop is detected": + - requires: + cluster_features: [ "gte_v8.15.0" ] + reason: "data stream failure stores REST structure changed in 8.15+" + test_runner_features: [ allowed_warnings, contains ] + + - do: + ingest.put_pipeline: + id: "step_1" + body: > + { + "description": "_description", + "processors": [ + { + "pipeline": { + "tag": "step-1", + "name": "step_2" + } + } + ] + } + - match: { acknowledged: true } + + - do: + ingest.put_pipeline: + id: "step_2" + body: > + { + "description": "_description", + "processors": [ + { + "pipeline": { + "tag": "step-2", + "name": "step_1" + } + } + ] + } + - match: { acknowledged: true } + + - do: + allowed_warnings: + - "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation" + indices.put_index_template: + name: generic_logs_template + body: + index_patterns: logs-* + data_stream: + failure_store: true + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + index: + default_pipeline: "step_1" + + - do: + index: + index: logs-foobar + refresh: true + body: + '@timestamp': '2020-12-12' + foo: bar + + - do: + indices.get_data_stream: + name: logs-foobar + - match: { data_streams.0.name: logs-foobar } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.enabled: true } + - length: { data_streams.0.failure_store.indices: 1 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + + - do: + search: + index: logs-foobar + body: { query: { match_all: { } } } + - length: { hits.hits: 0 } + + - do: + search: + index: .fs-logs-foobar-* + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: "/\\.fs-logs-foobar-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000001/" } + - exists: hits.hits.0._source.@timestamp + - not_exists: hits.hits.0._source.foo + - not_exists: hits.hits.0._source.document.id + - match: { hits.hits.0._source.document.index: 'logs-foobar' } + - match: { hits.hits.0._source.document.source.@timestamp: '2020-12-12' } + - match: { hits.hits.0._source.document.source.foo: 'bar' } + - match: { hits.hits.0._source.error.type: 'graph_structure_exception' } + - contains: { hits.hits.0._source.error.message: 'Cycle detected for pipeline: step_1' } + - contains: { hits.hits.0._source.error.stack_trace: 'Cycle detected for pipeline: step_1' } + - match: { hits.hits.0._source.error.pipeline_trace.0: 'step_1' } + - match: { hits.hits.0._source.error.pipeline_trace.1: 'step_2' } + - match: { hits.hits.0._source.error.pipeline: 'step_2' } + - match: { hits.hits.0._source.error.processor_tag: 'step-2' } + - match: { hits.hits.0._source.error.processor_type: 'pipeline' } + + - do: + indices.delete_data_stream: + name: logs-foobar + - is_true: acknowledged + + - do: + indices.delete: + index: .fs-logs-foobar-* + - is_true: acknowledged diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 046c049bff0d8..d7db8f4ec09dd 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1927,6 +1927,12 @@ private enum ElasticsearchExceptionHandle { ResourceAlreadyUploadedException::new, 181, TransportVersions.ADD_RESOURCE_ALREADY_UPLOADED_EXCEPTION + ), + INGEST_PIPELINE_EXCEPTION( + org.elasticsearch.ingest.IngestPipelineException.class, + org.elasticsearch.ingest.IngestPipelineException::new, + 182, + TransportVersions.INGEST_PIPELINE_EXCEPTION_ADDED ); final Class exceptionClass; diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 930adaf6258d1..e0fab5a3e1231 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -188,6 +188,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_SINGLE_VALUE_QUERY_SOURCE = def(8_718_00_0); public static final TransportVersion ESQL_ORIGINAL_INDICES = def(8_719_00_0); public static final TransportVersion ML_INFERENCE_EIS_INTEGRATION_ADDED = def(8_720_00_0); + public static final TransportVersion INGEST_PIPELINE_EXCEPTION_ADDED = def(8_721_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java index 962e844529125..527a886905aaf 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java @@ -70,20 +70,14 @@ public IndexRequest transformFailedRequest( Supplier timeSupplier ) throws IOException { return new IndexRequest().index(targetIndexName) - .source(createSource(source, exception, targetIndexName, timeSupplier)) + .source(createSource(source, exception, timeSupplier)) .opType(DocWriteRequest.OpType.CREATE) .setWriteToFailureStore(true); } - private static XContentBuilder createSource( - IndexRequest source, - Exception exception, - String targetIndexName, - Supplier timeSupplier - ) throws IOException { + private static XContentBuilder createSource(IndexRequest source, Exception exception, Supplier timeSupplier) throws IOException { Objects.requireNonNull(source, "source must not be null"); Objects.requireNonNull(exception, "exception must not be null"); - Objects.requireNonNull(targetIndexName, "targetIndexName must not be null"); Objects.requireNonNull(timeSupplier, "timeSupplier must not be null"); Throwable unwrapped = ExceptionsHelper.unwrapCause(exception); XContentBuilder builder = JsonXContent.contentBuilder(); @@ -98,7 +92,9 @@ private static XContentBuilder createSource( if (source.routing() != null) { builder.field("routing", source.routing()); } - builder.field("index", targetIndexName); + if (source.index() != null) { + builder.field("index", source.index()); + } // Unmapped source field builder.startObject("source"); { diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestPipelineException.java b/server/src/main/java/org/elasticsearch/ingest/IngestPipelineException.java new file mode 100644 index 0000000000000..a6986f18e09d3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/ingest/IngestPipelineException.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchWrapperException; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.ingest.CompoundProcessor.PIPELINE_ORIGIN_EXCEPTION_HEADER; + +/** + * A dedicated wrapper for exceptions encountered while executing an ingest pipeline. Unlike {@link IngestProcessorException}, this + * exception indicates an issue with the overall pipeline execution, either due to mid-process validation problem or other non-processor + * level issues with the execution. The wrapper is needed as we currently only unwrap causes for instances of + * {@link ElasticsearchWrapperException}. + */ +public class IngestPipelineException extends ElasticsearchException implements ElasticsearchWrapperException { + + IngestPipelineException(final String pipeline, final Exception cause) { + super(cause); + this.addHeader(PIPELINE_ORIGIN_EXCEPTION_HEADER, List.of(pipeline)); + } + + public IngestPipelineException(final StreamInput in) throws IOException { + super(in); + } + +} diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index dde30377df15b..0b1a135a17214 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -944,14 +944,17 @@ private void executePipelines( // An IllegalArgumentException can be thrown when an ingest processor creates a source map that is self-referencing. // In that case, we catch and wrap the exception, so we can include more details exceptionHandler.accept( - new IllegalArgumentException( - format( - "Failed to generate the source document for ingest pipeline [%s] for document [%s/%s]", - pipelineId, - indexRequest.index(), - indexRequest.id() - ), - ex + new IngestPipelineException( + pipelineId, + new IllegalArgumentException( + format( + "Failed to generate the source document for ingest pipeline [%s] for document [%s/%s]", + pipelineId, + indexRequest.index(), + indexRequest.id() + ), + ex + ) ) ); return; // document failed! @@ -963,14 +966,18 @@ private void executePipelines( if (Objects.equals(originalIndex, newIndex) == false) { // final pipelines cannot change the target index (either directly or by way of a reroute) if (isFinalPipeline) { + logger.info("Service stack: [{}]", ingestDocument.getPipelineStack()); exceptionHandler.accept( - new IllegalStateException( - format( - "final pipeline [%s] can't change the target index (from [%s] to [%s]) for document [%s]", - pipelineId, - originalIndex, - newIndex, - indexRequest.id() + new IngestPipelineException( + pipelineId, + new IllegalStateException( + format( + "final pipeline [%s] can't change the target index (from [%s] to [%s]) for document [%s]", + pipelineId, + originalIndex, + newIndex, + indexRequest.id() + ) ) ) ); @@ -983,12 +990,15 @@ private void executePipelines( List indexCycle = new ArrayList<>(ingestDocument.getIndexHistory()); indexCycle.add(newIndex); exceptionHandler.accept( - new IllegalStateException( - format( - "index cycle detected while processing pipeline [%s] for document [%s]: %s", - pipelineId, - indexRequest.id(), - indexCycle + new IngestPipelineException( + pipelineId, + new IllegalStateException( + format( + "index cycle detected while processing pipeline [%s] for document [%s]: %s", + pipelineId, + indexRequest.id(), + indexCycle + ) ) ) ); diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 7ac4215670405..58f85474f38ed 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -67,6 +67,7 @@ import org.elasticsearch.indices.recovery.RecoverFilesRecoveryException; import org.elasticsearch.indices.recovery.RecoveryCommitTooNewException; import org.elasticsearch.ingest.GraphStructureException; +import org.elasticsearch.ingest.IngestPipelineException; import org.elasticsearch.ingest.IngestProcessorException; import org.elasticsearch.persistent.NotPersistentTaskNodeException; import org.elasticsearch.persistent.PersistentTaskNodeNotAssignedException; @@ -834,6 +835,7 @@ public void testIds() { ids.put(179, NotPersistentTaskNodeException.class); ids.put(180, PersistentTaskNodeNotAssignedException.class); ids.put(181, ResourceAlreadyUploadedException.class); + ids.put(182, IngestPipelineException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverterTests.java b/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverterTests.java index c03d5e16b287b..d33b2877d4280 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverterTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverterTests.java @@ -83,7 +83,7 @@ public void testFailureStoreDocumentConversion() throws Exception { assertThat(ObjectPath.eval("document.id", convertedRequest.sourceAsMap()), is(equalTo("1"))); assertThat(ObjectPath.eval("document.routing", convertedRequest.sourceAsMap()), is(equalTo("fake_routing"))); - assertThat(ObjectPath.eval("document.index", convertedRequest.sourceAsMap()), is(equalTo(targetIndexName))); + assertThat(ObjectPath.eval("document.index", convertedRequest.sourceAsMap()), is(equalTo("original_index"))); assertThat(ObjectPath.eval("document.source.key", convertedRequest.sourceAsMap()), is(equalTo("value"))); assertThat(ObjectPath.eval("error.type", convertedRequest.sourceAsMap()), is(equalTo("exception"))); From fb189c64a39cc0173d2c9b8e8a161767d31b394f Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 13 Aug 2024 15:58:35 -0400 Subject: [PATCH 015/389] ESQL: Drop an unneeded feature flag check (#111813) When checking if a type is `representable` we were excluding `DATE_NANOS` if it's feature flag was disabled. But we don't have to do it - nothing checks that or needs it. --- .../xpack/esql/core/type/DataType.java | 40 ++++++------------- 1 file changed, 12 insertions(+), 28 deletions(-) diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java index 9b1c0e710a9d7..771c78213a061 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java @@ -394,34 +394,18 @@ public static boolean areCompatible(DataType left, DataType right) { * Supported types that can be contained in a block. */ public static boolean isRepresentable(DataType t) { - if (EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG.isEnabled()) { - return t != OBJECT - && t != UNSUPPORTED - && t != DATE_PERIOD - && t != TIME_DURATION - && t != BYTE - && t != SHORT - && t != FLOAT - && t != SCALED_FLOAT - && t != SOURCE - && t != HALF_FLOAT - && t != PARTIAL_AGG - && t.isCounter() == false; - } else { - return t != OBJECT - && t != UNSUPPORTED - && t != DATE_PERIOD - && t != DATE_NANOS - && t != TIME_DURATION - && t != BYTE - && t != SHORT - && t != FLOAT - && t != SCALED_FLOAT - && t != SOURCE - && t != HALF_FLOAT - && t != PARTIAL_AGG - && t.isCounter() == false; - } + return t != OBJECT + && t != UNSUPPORTED + && t != DATE_PERIOD + && t != TIME_DURATION + && t != BYTE + && t != SHORT + && t != FLOAT + && t != SCALED_FLOAT + && t != SOURCE + && t != HALF_FLOAT + && t != PARTIAL_AGG + && t.isCounter() == false; } public static boolean isSpatialPoint(DataType t) { From e5fd63bbb8aaa7f42b27ac31fc88873fb7a78cf2 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 13 Aug 2024 21:55:38 +0100 Subject: [PATCH 016/389] More detail around packet captures (#111835) Clarify that it's best to analyse the captures alongside the node logs, and spell out in a bit more detail how to use packet captures and logs to pin down the cause of a `disconnected` node. --- .../discovery/fault-detection.asciidoc | 15 +++++++------- .../troubleshooting/network-timeouts.asciidoc | 20 ++++++++++--------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/docs/reference/modules/discovery/fault-detection.asciidoc b/docs/reference/modules/discovery/fault-detection.asciidoc index dfa49e5b0d9af..383e4c6044c67 100644 --- a/docs/reference/modules/discovery/fault-detection.asciidoc +++ b/docs/reference/modules/discovery/fault-detection.asciidoc @@ -168,9 +168,8 @@ reason, something other than {es} likely caused the connection to close. A common cause is a misconfigured firewall with an improper timeout or another policy that's <>. It could also be caused by general connectivity issues, such as packet loss due to faulty -hardware or network congestion. If you're an advanced user, you can get more -detailed information about network exceptions by configuring the following -loggers: +hardware or network congestion. If you're an advanced user, configure the +following loggers to get more detailed information about network exceptions: [source,yaml] ---- @@ -178,9 +177,11 @@ logger.org.elasticsearch.transport.TcpTransport: DEBUG logger.org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport: DEBUG ---- -In extreme cases, you may need to take packet captures using `tcpdump` to -determine whether messages between nodes are being dropped or rejected by some -other device on the network. +If these logs do not show enough information to diagnose the problem, obtain a +packet capture simultaneously from the nodes at both ends of an unstable +connection and analyse it alongside the {es} logs from those nodes to determine +if traffic between the nodes is being disrupted by another device on the +network. [discrete] ===== Diagnosing `lagging` nodes @@ -299,4 +300,4 @@ To reconstruct the output, base64-decode the data and decompress it using ---- cat shardlock.log | sed -e 's/.*://' | base64 --decode | gzip --decompress ---- -//end::troubleshooting[] \ No newline at end of file +//end::troubleshooting[] diff --git a/docs/reference/troubleshooting/network-timeouts.asciidoc b/docs/reference/troubleshooting/network-timeouts.asciidoc index ef942ac1d268d..ef666c09f87db 100644 --- a/docs/reference/troubleshooting/network-timeouts.asciidoc +++ b/docs/reference/troubleshooting/network-timeouts.asciidoc @@ -16,20 +16,22 @@ end::troubleshooting-network-timeouts-gc-vm[] tag::troubleshooting-network-timeouts-packet-capture-elections[] * Packet captures will reveal system-level and network-level faults, especially -if you capture the network traffic simultaneously at all relevant nodes. You -should be able to observe any retransmissions, packet loss, or other delays on -the connections between the nodes. +if you capture the network traffic simultaneously at all relevant nodes and +analyse it alongside the {es} logs from those nodes. You should be able to +observe any retransmissions, packet loss, or other delays on the connections +between the nodes. end::troubleshooting-network-timeouts-packet-capture-elections[] tag::troubleshooting-network-timeouts-packet-capture-fault-detection[] * Packet captures will reveal system-level and network-level faults, especially if you capture the network traffic simultaneously at the elected master and the -faulty node. The connection used for follower checks is not used for any other -traffic so it can be easily identified from the flow pattern alone, even if TLS -is in use: almost exactly every second there will be a few hundred bytes sent -each way, first the request by the master and then the response by the -follower. You should be able to observe any retransmissions, packet loss, or -other delays on such a connection. +faulty node and analyse it alongside the {es} logs from those nodes. The +connection used for follower checks is not used for any other traffic so it can +be easily identified from the flow pattern alone, even if TLS is in use: almost +exactly every second there will be a few hundred bytes sent each way, first the +request by the master and then the response by the follower. You should be able +to observe any retransmissions, packet loss, or other delays on such a +connection. end::troubleshooting-network-timeouts-packet-capture-fault-detection[] tag::troubleshooting-network-timeouts-threads[] From 935c0e4e2b66dbebe573ddb9d6ee3c9098763ed1 Mon Sep 17 00:00:00 2001 From: john-wagster Date: Tue, 13 Aug 2024 17:03:30 -0500 Subject: [PATCH 017/389] Explain Function Score Query (#111807) allowing for a custom explanation to be passed through as part of supporting building a plugin with a custom script score; previously threw an npe --- docs/changelog/111807.yaml | 5 ++ .../expertscript/ExpertScriptPlugin.java | 11 ++-- .../test/script_expert_scoring/20_score.yml | 66 ++++++++++++++----- .../search/function/ScriptScoreFunction.java | 24 +++++-- 4 files changed, 80 insertions(+), 26 deletions(-) create mode 100644 docs/changelog/111807.yaml diff --git a/docs/changelog/111807.yaml b/docs/changelog/111807.yaml new file mode 100644 index 0000000000000..97c5e58461c34 --- /dev/null +++ b/docs/changelog/111807.yaml @@ -0,0 +1,5 @@ +pr: 111807 +summary: Explain Function Score Query +area: Search +type: bug +issues: [] diff --git a/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java b/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java index 894f4ebe4bc54..dc429538fec3b 100644 --- a/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java +++ b/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java @@ -35,10 +35,7 @@ public class ExpertScriptPlugin extends Plugin implements ScriptPlugin { @Override - public ScriptEngine getScriptEngine( - Settings settings, - Collection> contexts - ) { + public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { return new MyExpertScriptEngine(); } @@ -143,6 +140,9 @@ public ScoreScript newInstance(DocReader docReader) public double execute( ExplanationHolder explanation ) { + if(explanation != null) { + explanation.set("An example optional custom description to explain details for this script's execution; we'll provide a default one if you leave this out."); + } return 0.0d; } }; @@ -166,6 +166,9 @@ public void setDocument(int docid) { } @Override public double execute(ExplanationHolder explanation) { + if(explanation != null) { + explanation.set("An example optional custom description to explain details for this script's execution; we'll provide a default one if you leave this out."); + } if (postings.docID() != currentDocid) { /* * advance moved past the current doc, so this diff --git a/plugins/examples/script-expert-scoring/src/yamlRestTest/resources/rest-api-spec/test/script_expert_scoring/20_score.yml b/plugins/examples/script-expert-scoring/src/yamlRestTest/resources/rest-api-spec/test/script_expert_scoring/20_score.yml index 89194d162872d..8f0b670ef03e3 100644 --- a/plugins/examples/script-expert-scoring/src/yamlRestTest/resources/rest-api-spec/test/script_expert_scoring/20_score.yml +++ b/plugins/examples/script-expert-scoring/src/yamlRestTest/resources/rest-api-spec/test/script_expert_scoring/20_score.yml @@ -4,26 +4,27 @@ setup: - do: indices.create: - index: test + index: test - do: index: - index: test - id: "1" - body: { "important_field": "foo" } + index: test + id: "1" + body: { "important_field": "foo" } - do: - index: - index: test - id: "2" - body: { "important_field": "foo foo foo" } + index: + index: test + id: "2" + body: { "important_field": "foo foo foo" } - do: - index: - index: test - id: "3" - body: { "important_field": "foo foo" } + index: + index: test + id: "3" + body: { "important_field": "foo foo" } - do: - indices.refresh: {} + indices.refresh: { } + --- "document scoring": - do: @@ -46,6 +47,39 @@ setup: term: "foo" - length: { hits.hits: 3 } - - match: {hits.hits.0._id: "2" } - - match: {hits.hits.1._id: "3" } - - match: {hits.hits.2._id: "1" } + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.2._id: "1" } + +--- +"document scoring with custom explanation": + + - requires: + cluster_features: [ "gte_v8.16.0" ] + reason: "bug fixed where explanations were throwing npe prior to 8.16" + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + explain: true + query: + function_score: + query: + match: + important_field: "foo" + functions: + - script_score: + script: + source: "pure_df" + lang: "expert_scripts" + params: + field: "important_field" + term: "foo" + + - length: { hits.hits: 3 } + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.2._id: "1" } + - match: { hits.hits.0._explanation.details.1.details.0.description: "An example optional custom description to explain details for this script's execution; we'll provide a default one if you leave this out." } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java index 58cbfa2be3f05..6b8a75337b8ee 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java @@ -62,18 +62,24 @@ public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) throws IOEx leafScript._setIndexName(indexName); leafScript._setShard(shardId); return new LeafScoreFunction() { - @Override - public double score(int docId, float subQueryScore) throws IOException { + + private double score(int docId, float subQueryScore, ScoreScript.ExplanationHolder holder) throws IOException { leafScript.setDocument(docId); scorer.docid = docId; scorer.score = subQueryScore; - double result = leafScript.execute(null); + double result = leafScript.execute(holder); + if (result < 0f) { throw new IllegalArgumentException("script score function must not produce negative scores, but got: [" + result + "]"); } return result; } + @Override + public double score(int docId, float subQueryScore) throws IOException { + return score(docId, subQueryScore, null); + } + @Override public Explanation explainScore(int docId, Explanation subQueryScore) throws IOException { Explanation exp; @@ -83,11 +89,17 @@ public Explanation explainScore(int docId, Explanation subQueryScore) throws IOE scorer.score = subQueryScore.getValue().floatValue(); exp = ((ExplainableScoreScript) leafScript).explain(subQueryScore); } else { - double score = score(docId, subQueryScore.getValue().floatValue()); + ScoreScript.ExplanationHolder holder = new ScoreScript.ExplanationHolder(); + double score = score(docId, subQueryScore.getValue().floatValue(), holder); // info about params already included in sScript - String explanation = "script score function, computed with script:\"" + sScript + "\""; Explanation scoreExp = Explanation.match(subQueryScore.getValue(), "_score: ", subQueryScore); - return Explanation.match((float) score, explanation, scoreExp); + Explanation customExplanation = holder.get(score, null); + if (customExplanation != null) { + return Explanation.match((float) score, customExplanation.getDescription(), scoreExp); + } else { + String explanation = "script score function, computed with script:\"" + sScript + "\""; + return Explanation.match((float) score, explanation, scoreExp); + } } return exp; } From 17339198d8ef563661f6189853130ea968cb76fe Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 14 Aug 2024 12:23:19 +0700 Subject: [PATCH 018/389] Allow legacy_* index.codec options to be configured. (#111867) For escape hatch reasons when zstd unexpectedly worse performance. --- .../index/engine/EngineConfig.java | 9 ++++-- .../index/codec/LegacyCodecTests.java | 29 +++++++++++++++++++ 2 files changed, 35 insertions(+), 3 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/index/codec/LegacyCodecTests.java diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index f82ac04207604..079d6479a63e4 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -24,6 +24,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.CodecProvider; +import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.shard.ShardId; @@ -97,9 +98,11 @@ public Supplier retentionLeasesSupplier() { */ public static final Setting INDEX_CODEC_SETTING = new Setting<>("index.codec", "default", s -> { switch (s) { - case "default": - case "best_compression": - case "lucene_default": + case CodecService.DEFAULT_CODEC: + case CodecService.LEGACY_DEFAULT_CODEC: + case CodecService.BEST_COMPRESSION_CODEC: + case CodecService.LEGACY_BEST_COMPRESSION_CODEC: + case CodecService.LUCENE_DEFAULT_CODEC: return s; default: if (Codec.availableCodecs().contains(s) == false) { // we don't error message the not officially supported ones diff --git a/server/src/test/java/org/elasticsearch/index/codec/LegacyCodecTests.java b/server/src/test/java/org/elasticsearch/index/codec/LegacyCodecTests.java new file mode 100644 index 0000000000000..dbe83af1a0cfb --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/codec/LegacyCodecTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESSingleNodeTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class LegacyCodecTests extends ESSingleNodeTestCase { + + public void testCanConfigureLegacySettings() { + assumeTrue("Only when zstd_stored_fields feature flag is enabled", CodecService.ZSTD_STORED_FIELDS_FEATURE_FLAG.isEnabled()); + + createIndex("index1", Settings.builder().put("index.codec", "legacy_default").build()); + var codec = client().admin().indices().prepareGetSettings("index1").execute().actionGet().getSetting("index1", "index.codec"); + assertThat(codec, equalTo("legacy_default")); + + createIndex("index2", Settings.builder().put("index.codec", "legacy_best_compression").build()); + codec = client().admin().indices().prepareGetSettings("index2").execute().actionGet().getSetting("index2", "index.codec"); + assertThat(codec, equalTo("legacy_best_compression")); + } +} From b32c66fde3f870edbbbd0bad830fc97ce9b04081 Mon Sep 17 00:00:00 2001 From: Francois-Clement Brossard Date: Wed, 14 Aug 2024 16:20:48 +0900 Subject: [PATCH 019/389] Fix Start Trial API output acknowledgement header for features (#111740) * Fix Start Trial API output acknowledgement header for features * Update docs/changelog/111740.yaml --------- Co-authored-by: Elastic Machine --- docs/changelog/111740.yaml | 6 ++++++ .../org/elasticsearch/license/StartTrialClusterTask.java | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/111740.yaml diff --git a/docs/changelog/111740.yaml b/docs/changelog/111740.yaml new file mode 100644 index 0000000000000..48b7ee200e45e --- /dev/null +++ b/docs/changelog/111740.yaml @@ -0,0 +1,6 @@ +pr: 111740 +summary: Fix Start Trial API output acknowledgement header for features +area: License +type: bug +issues: + - 111739 diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java index 67731b03d3e65..22f4de105cb2d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java @@ -24,7 +24,7 @@ public class StartTrialClusterTask implements ClusterStateTaskListener { - private static final String ACKNOWLEDGEMENT_HEADER = "This API initiates a free 30-day trial for all platinum features. " + private static final String ACKNOWLEDGEMENT_HEADER = "This API initiates a free 30-day trial for all subscription features. " + "By starting this trial, you agree that it is subject to the terms and conditions at" + " https://www.elastic.co/legal/trial_license/. To begin your free trial, call /start_trial again and specify " + "the \"acknowledge=true\" parameter."; From 864bcd0d91f5bd9cfcb63fc9a0bb6495a59b76c4 Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Wed, 14 Aug 2024 12:06:01 +0200 Subject: [PATCH 020/389] ESQL: Fix mutateInstance in EsIndexSerializationTests (#111873) --- .../xpack/esql/index/EsIndexSerializationTests.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java index 1e5a6261d055a..1ac61a2adf68e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java @@ -58,8 +58,8 @@ protected EsIndex createTestInstance() { @Override protected EsIndex mutateInstance(EsIndex instance) throws IOException { String name = instance.name(); - Map mapping = randomMapping(); - Set concreteIndices = randomConcreteIndices(); + Map mapping = instance.mapping(); + Set concreteIndices = instance.concreteIndices(); switch (between(0, 2)) { case 0 -> name = randomValueOtherThan(name, () -> randomAlphaOfLength(5)); case 1 -> mapping = randomValueOtherThan(mapping, EsIndexSerializationTests::randomMapping); From 451640014351160325b4a16b295219dbd496e593 Mon Sep 17 00:00:00 2001 From: Patrick Doyle <810052+prdoyle@users.noreply.github.com> Date: Wed, 14 Aug 2024 08:08:58 -0400 Subject: [PATCH 021/389] More XContent long coercion cases (#111641) * More XContent long coercion cases * spotless --- .../xcontent/XContentParserTests.java | 65 ++++++++++++++----- 1 file changed, 47 insertions(+), 18 deletions(-) diff --git a/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentParserTests.java index b9cb7df84a8e4..58cb0af79e103 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentParserTests.java @@ -80,39 +80,68 @@ public void testLongCoercion() throws IOException { try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { builder.startObject(); - builder.field("decimal", "5.5"); - builder.field("expInRange", "5e18"); + + builder.field("five", "5.5"); + builder.field("minusFive", "-5.5"); + + builder.field("minNegative", "-9.2233720368547758089999e18"); + builder.field("tooNegative", "-9.223372036854775809e18"); + builder.field("maxPositive", "9.2233720368547758079999e18"); + builder.field("tooPositive", "9.223372036854775808e18"); + builder.field("expTooBig", "2e100"); + builder.field("minusExpTooBig", "-2e100"); + builder.field("maxPositiveExp", "1e2147483647"); + builder.field("tooPositiveExp", "1e2147483648"); + builder.field("expTooSmall", "2e-100"); + builder.field("minusExpTooSmall", "-2e-100"); + builder.field("maxNegativeExp", "1e-2147483647"); + + builder.field("tooNegativeExp", "1e-2147483648"); + builder.endObject(); try (XContentParser parser = createParser(xContentType.xContent(), BytesReference.bytes(builder))) { assertThat(parser.nextToken(), is(XContentParser.Token.START_OBJECT)); - assertThat(parser.nextToken(), is(XContentParser.Token.FIELD_NAME)); - assertThat(parser.currentName(), is("decimal")); - assertThat(parser.nextToken(), is(XContentParser.Token.VALUE_STRING)); - assertThat(parser.longValue(), equalTo(5L)); + assertFieldWithValue("five", 5L, parser); + assertFieldWithValue("minusFive", -5L, parser); // Rounds toward zero - assertThat(parser.nextToken(), is(XContentParser.Token.FIELD_NAME)); - assertThat(parser.currentName(), is("expInRange")); - assertThat(parser.nextToken(), is(XContentParser.Token.VALUE_STRING)); - assertThat(parser.longValue(), equalTo((long) 5e18)); + assertFieldWithValue("minNegative", Long.MIN_VALUE, parser); + assertFieldWithInvalidLongValue("tooNegative", parser); + assertFieldWithValue("maxPositive", Long.MAX_VALUE, parser); + assertFieldWithInvalidLongValue("tooPositive", parser); - assertThat(parser.nextToken(), is(XContentParser.Token.FIELD_NAME)); - assertThat(parser.currentName(), is("expTooBig")); - assertThat(parser.nextToken(), is(XContentParser.Token.VALUE_STRING)); - expectThrows(IllegalArgumentException.class, parser::longValue); + assertFieldWithInvalidLongValue("expTooBig", parser); + assertFieldWithInvalidLongValue("minusExpTooBig", parser); + assertFieldWithInvalidLongValue("maxPositiveExp", parser); + assertFieldWithInvalidLongValue("tooPositiveExp", parser); // too small goes to zero - assertThat(parser.nextToken(), is(XContentParser.Token.FIELD_NAME)); - assertThat(parser.currentName(), is("expTooSmall")); - assertThat(parser.nextToken(), is(XContentParser.Token.VALUE_STRING)); - assertThat(parser.longValue(), equalTo(0L)); + assertFieldWithValue("expTooSmall", 0L, parser); + assertFieldWithValue("minusExpTooSmall", 0L, parser); + assertFieldWithValue("maxNegativeExp", 0L, parser); + + assertFieldWithInvalidLongValue("tooNegativeExp", parser); } } } + private static void assertFieldWithValue(String fieldName, long fieldValue, XContentParser parser) throws IOException { + assertThat(parser.nextToken(), is(XContentParser.Token.FIELD_NAME)); + assertThat(parser.currentName(), is(fieldName)); + assertThat(parser.nextToken(), is(XContentParser.Token.VALUE_STRING)); + assertThat(parser.longValue(), equalTo(fieldValue)); + } + + private static void assertFieldWithInvalidLongValue(String fieldName, XContentParser parser) throws IOException { + assertThat(parser.nextToken(), is(XContentParser.Token.FIELD_NAME)); + assertThat(parser.currentName(), is(fieldName)); + assertThat(parser.nextToken(), is(XContentParser.Token.VALUE_STRING)); + expectThrows(IllegalArgumentException.class, parser::longValue); + } + public void testReadList() throws IOException { assertThat(readList("{\"foo\": [\"bar\"]}"), contains("bar")); assertThat(readList("{\"foo\": [\"bar\",\"baz\"]}"), contains("bar", "baz")); From a63a2f7b008dde99dc2219220288f348616c483a Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 14 Aug 2024 13:32:13 +0100 Subject: [PATCH 022/389] Remove `CompletableFuture` from `Node#prepareForClose` (#111846) More of a style thing than anything else given that there's no risk of catching an `Error` here, but still generally there are alternatives to `CompletableFuture` which we prefer. --- .../java/org/elasticsearch/node/Node.java | 86 +++++++++++-------- 1 file changed, 51 insertions(+), 35 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index df0ad3009abda..3302114b078a8 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -13,6 +13,9 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.client.internal.Client; @@ -40,7 +43,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.core.Assertions; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.PathUtils; @@ -101,11 +103,14 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.function.BiConsumer; import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; import javax.net.ssl.SNIHostName; @@ -591,52 +596,63 @@ public synchronized void close() throws IOException { * Invokes hooks to prepare this node to be closed. This should be called when Elasticsearch receives a request to shut down * gracefully from the underlying operating system, before system resources are closed. This method will block * until the node is ready to shut down. - * + *

* Note that this class is part of infrastructure to react to signals from the operating system - most graceful shutdown * logic should use Node Shutdown, see {@link org.elasticsearch.cluster.metadata.NodesShutdownMetadata}. */ public void prepareForClose() { - HttpServerTransport httpServerTransport = injector.getInstance(HttpServerTransport.class); - Map stoppers = new HashMap<>(); - TimeValue maxTimeout = MAXIMUM_SHUTDOWN_TIMEOUT_SETTING.get(this.settings()); - stoppers.put("http-server-transport-stop", httpServerTransport::close); - stoppers.put("async-search-stop", () -> this.awaitSearchTasksComplete(maxTimeout)); - if (terminationHandler != null) { - stoppers.put("termination-handler-stop", terminationHandler::handleTermination); + final var maxTimeout = MAXIMUM_SHUTDOWN_TIMEOUT_SETTING.get(this.settings()); + + record Stopper(String name, SubscribableListener listener) { + boolean isIncomplete() { + return listener().isDone() == false; + } } - Map> futures = new HashMap<>(stoppers.size()); - for (var stopperEntry : stoppers.entrySet()) { - var future = new CompletableFuture(); - new Thread(() -> { - try { - stopperEntry.getValue().run(); - } catch (Exception ex) { - logger.warn("unexpected exception in shutdown task [" + stopperEntry.getKey() + "]", ex); - } finally { - future.complete(null); - } - }, stopperEntry.getKey()).start(); - futures.put(stopperEntry.getKey(), future); + final var stoppers = new ArrayList(); + final var allStoppersFuture = new PlainActionFuture(); + try (var listeners = new RefCountingListener(allStoppersFuture)) { + final BiConsumer stopperRunner = (name, action) -> { + final var stopper = new Stopper(name, new SubscribableListener<>()); + stoppers.add(stopper); + stopper.listener().addListener(listeners.acquire()); + new Thread(() -> { + try { + action.run(); + } catch (Exception ex) { + logger.warn("unexpected exception in shutdown task [" + stopper.name() + "]", ex); + } finally { + stopper.listener().onResponse(null); + } + }, stopper.name()).start(); + }; + + stopperRunner.accept("http-server-transport-stop", injector.getInstance(HttpServerTransport.class)::close); + stopperRunner.accept("async-search-stop", () -> awaitSearchTasksComplete(maxTimeout)); + if (terminationHandler != null) { + stopperRunner.accept("termination-handler-stop", terminationHandler::handleTermination); + } } - @SuppressWarnings(value = "rawtypes") // Can't make an array of parameterized types, but it complains if you leave the type out - CompletableFuture allStoppers = CompletableFuture.allOf(futures.values().toArray(new CompletableFuture[stoppers.size()])); + final Supplier incompleteStoppersDescriber = () -> stoppers.stream() + .filter(Stopper::isIncomplete) + .map(Stopper::name) + .collect(Collectors.joining(", ", "[", "]")); try { if (TimeValue.ZERO.equals(maxTimeout)) { - FutureUtils.get(allStoppers); + allStoppersFuture.get(); } else { - FutureUtils.get(allStoppers, maxTimeout.millis(), TimeUnit.MILLISECONDS); + allStoppersFuture.get(maxTimeout.millis(), TimeUnit.MILLISECONDS); } - - } catch (ElasticsearchTimeoutException t) { - var unfinishedTasks = futures.entrySet() - .stream() - .filter(entry -> entry.getValue().isDone() == false) - .map(Map.Entry::getKey) - .toList(); - logger.warn("timed out while waiting for graceful shutdown tasks: " + unfinishedTasks); + } catch (ExecutionException e) { + assert false : e; // listeners are never completed exceptionally + logger.warn("failed during graceful shutdown tasks", e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.warn("interrupted while waiting for graceful shutdown tasks: " + incompleteStoppersDescriber.get(), e); + } catch (TimeoutException e) { + logger.warn("timed out while waiting for graceful shutdown tasks: " + incompleteStoppersDescriber.get()); } } From dd91242e78022479a50ef6d8ab6ed07f9d2d01ed Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Wed, 14 Aug 2024 15:02:46 +0200 Subject: [PATCH 023/389] ESQL: Have BUCKET generate friendlier intervals (#111879) Currently, when specifing a range for BUCKET to generate an interval, the upper bound is not considered as part of the range to cover. This changs that, so that the resulting interval matches closer the formula: `(to - from)/buckets` Resolves #110916. --- docs/changelog/111879.yaml | 6 + .../expression/function/grouping/Bucket.java | 2 +- .../function/grouping/BucketTests.java | 14 ++ .../test/esql/26_aggs_bucket.yml | 179 ++++++++++++++++++ 4 files changed, 200 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/111879.yaml create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml diff --git a/docs/changelog/111879.yaml b/docs/changelog/111879.yaml new file mode 100644 index 0000000000000..b8c2111e1d286 --- /dev/null +++ b/docs/changelog/111879.yaml @@ -0,0 +1,6 @@ +pr: 111879 +summary: "ESQL: Have BUCKET generate friendlier intervals" +area: ES|QL +type: enhancement +issues: + - 110916 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java index 8547e5c6f5730..712eee8672bf3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java @@ -288,7 +288,7 @@ boolean roundingIsOk(Rounding rounding) { while (used < buckets) { bucket = r.nextRoundingValue(bucket); used++; - if (bucket > to) { + if (bucket >= to) { return true; } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketTests.java index 64498f1b5a4fe..4c7b812111450 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketTests.java @@ -92,6 +92,20 @@ private static void dateCases(List suppliers, String name, Lon dateResultsMatcher(args) ); })); + // same as above, but a low bucket count and datetime bounds that match it (at hour span) + suppliers.add(new TestCaseSupplier(name, List.of(DataType.DATETIME, DataType.INTEGER, fromType, toType), () -> { + List args = new ArrayList<>(); + args.add(new TestCaseSupplier.TypedData(date.getAsLong(), DataType.DATETIME, "field")); + args.add(new TestCaseSupplier.TypedData(4, DataType.INTEGER, "buckets").forceLiteral()); + args.add(dateBound("from", fromType, "2023-02-17T09:00:00Z")); + args.add(dateBound("to", toType, "2023-02-17T12:00:00Z")); + return new TestCaseSupplier.TestCase( + args, + "DateTruncEvaluator[fieldVal=Attribute[channel=0], rounding=Rounding[3600000 in Z][fixed]]", + DataType.DATETIME, + equalTo(Rounding.builder(Rounding.DateTimeUnit.HOUR_OF_DAY).build().prepareForUnknown().round(date.getAsLong())) + ); + })); } } } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml new file mode 100644 index 0000000000000..d18b6261fc1d7 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml @@ -0,0 +1,179 @@ +--- +"friendlier BUCKET interval hourly: #110916": + - requires: + cluster_features: ["gte_v8.14.0"] + reason: "BUCKET extended in 8.14.0" + test_runner_features: allowed_warnings_regex + - do: + indices.create: + index: test_bucket + body: + mappings: + properties: + ts : + type : date + + - do: + bulk: + refresh: true + body: + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-07-16T08:10:00Z" } + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-07-16T09:20:00Z" } + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-07-16T10:30:00Z" } + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-07-16T11:40:00Z" } + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM test_bucket | STATS c = COUNT(*) BY b = BUCKET(ts, 4, "2024-07-16T08:00:00Z", "2024-07-16T12:00:00Z") | SORT b' + - match: { columns.0.name: c } + - match: { columns.0.type: long } + - match: { columns.1.name: b } + - match: { columns.1.type: date } + - length: { values: 4 } + - match: { values.0.0: 1 } + - match: { values.0.1: "2024-07-16T08:00:00.000Z" } + - match: { values.1.0: 1 } + - match: { values.1.1: "2024-07-16T09:00:00.000Z" } + - match: { values.2.0: 1 } + - match: { values.2.1: "2024-07-16T10:00:00.000Z" } + - match: { values.3.0: 1 } + - match: { values.3.1: "2024-07-16T11:00:00.000Z" } + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM test_bucket | STATS c = COUNT(*) BY b = BUCKET(ts, 4, "2024-07-16T08:00:00Z", "2024-07-16T12:00:00.001Z") | SORT b' + - match: { columns.0.name: c } + - match: { columns.0.type: long } + - match: { columns.1.name: b } + - match: { columns.1.type: date } + - length: { values: 2 } + - match: { values.0.0: 1 } + - match: { values.0.1: "2024-07-16T06:00:00.000Z" } + - match: { values.1.0: 3 } + - match: { values.1.1: "2024-07-16T09:00:00.000Z" } + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM test_bucket | STATS c = COUNT(*) BY b = BUCKET(ts, 4, "2024-07-16T08:09:00Z", "2024-07-16T12:00:00Z") | SORT b' + - match: { columns.0.name: c } + - match: { columns.0.type: long } + - match: { columns.1.name: b } + - match: { columns.1.type: date } + - length: { values: 4 } + - match: { values.0.0: 1 } + - match: { values.0.1: "2024-07-16T08:00:00.000Z" } + - match: { values.1.0: 1 } + - match: { values.1.1: "2024-07-16T09:00:00.000Z" } + - match: { values.2.0: 1 } + - match: { values.2.1: "2024-07-16T10:00:00.000Z" } + - match: { values.3.0: 1 } + - match: { values.3.1: "2024-07-16T11:00:00.000Z" } + +--- +"friendlier BUCKET interval: monthly #110916": + - requires: + cluster_features: ["gte_v8.14.0"] + reason: "BUCKET extended in 8.14.0" + test_runner_features: allowed_warnings_regex + - do: + indices.create: + index: test_bucket + body: + mappings: + properties: + ts : + type : date + + - do: + bulk: + refresh: true + body: + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-06-16" } + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-07-16" } + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-08-16" } + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-09-16" } + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM test_bucket | STATS c = COUNT(*) BY b = BUCKET(ts, 11, "2024-01-01", "2025-01-01") | SORT b' + - match: { columns.0.name: c } + - match: { columns.0.type: long } + - match: { columns.1.name: b } + - match: { columns.1.type: date } + - length: { values: 1 } + - match: { values.0.0: 4 } + - match: { values.0.1: "2024-01-01T00:00:00.000Z" } + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM test_bucket | STATS c = COUNT(*) BY b = BUCKET(ts, 12, "2024-01-01", "2025-01-01") | SORT b' + - match: { columns.0.name: c } + - match: { columns.0.type: long } + - match: { columns.1.name: b } + - match: { columns.1.type: date } + - length: { values: 4 } + - match: { values.0.0: 1 } + - match: { values.0.1: "2024-06-01T00:00:00.000Z" } + - match: { values.1.0: 1 } + - match: { values.1.1: "2024-07-01T00:00:00.000Z" } + - match: { values.2.0: 1 } + - match: { values.2.1: "2024-08-01T00:00:00.000Z" } + - match: { values.3.0: 1 } + - match: { values.3.1: "2024-09-01T00:00:00.000Z" } + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM test_bucket | STATS c = COUNT(*) BY b = BUCKET(ts, 12, "2024-01-01", "2025-01-01T00:00:00.001") | SORT b' + - match: { columns.0.name: c } + - match: { columns.0.type: long } + - match: { columns.1.name: b } + - match: { columns.1.type: date } + - length: { values: 1 } + - match: { values.0.0: 4 } + - match: { values.0.1: "2024-01-01T00:00:00.000Z" } + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM test_bucket | STATS c = COUNT(*) BY b = BUCKET(ts, 13, "2024-01-01T12:13:14Z", "2025-01-01") | SORT b' + - match: { columns.0.name: c } + - match: { columns.0.type: long } + - match: { columns.1.name: b } + - match: { columns.1.type: date } + - length: { values: 4 } + - match: { values.0.0: 1 } + - match: { values.0.1: "2024-06-01T00:00:00.000Z" } + - match: { values.1.0: 1 } + - match: { values.1.1: "2024-07-01T00:00:00.000Z" } + - match: { values.2.0: 1 } + - match: { values.2.1: "2024-08-01T00:00:00.000Z" } + - match: { values.3.0: 1 } + - match: { values.3.1: "2024-09-01T00:00:00.000Z" } From 7f67ba995865e624168a17757b1c9bcbc6832f07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Wed, 14 Aug 2024 16:04:11 +0200 Subject: [PATCH 024/389] [DOCS] Expands inference API main page info (#111830) --- .../inference/images/inference-landscape.png | Bin 0 -> 96237 bytes .../inference/inference-apis.asciidoc | 26 ++++++++++++++---- 2 files changed, 20 insertions(+), 6 deletions(-) create mode 100644 docs/reference/inference/images/inference-landscape.png diff --git a/docs/reference/inference/images/inference-landscape.png b/docs/reference/inference/images/inference-landscape.png new file mode 100644 index 0000000000000000000000000000000000000000..a35d1370fd09bf27854b389ddf190c87f015ee84 GIT binary patch literal 96237 zcmeGEWl&u~*Di`;!GgQHgrEy|2<{SsyF+kYIKge&;{FOxm}Qrz(&&V#cl6Nt zt2yh0&i3vl=(J|O9qE1z=&(LXn6c1Lw@)DohZKh)22HCnC;Rr7ftOjrD)8u&@5FBD zah6)ceWmiLZ|NQIJ+PypW3(_8G5p8Bny3sSuh`e9&zR7d82@T)h%!;CBLBRF5ip4v zhlkBSS1Jo9A^MM2Fi?#Ce>3UujT&n zpXDU~?{fZkIU!;6|44M|<&_;>ay`Dj4f&Ti{5xQbpb!^M@1Q^JhC~sVF%KFy2|T=i z%ne2Uzi~@)nje7h`D0?e#JHX*6Sm+lKb-ioH(82CBM7Lu_7J?I-2eHYhboM4*ZS$@ zU8_(~@eqnmJ3-hgCgvWB3m^LkIX&2nZ?}0JX|jVB`HO zs02`>Z7MYWnMF}BVS*Bs8UE^k?>~bnkX6KYBFhN>`BFjBM%76X zDmH*1X)Ug(f95LDxb6*t0hNyWv<9W1asR z?ElZ#{_7UAgGUztL)r^wEf-aiD!!-UFyHO=XHEnM%F|;t)zjS)18eMd1Z-m1B)N0oe z0I&k}+he?wdMK)# z+iq;&e9p9g&LDb4b@M9>%q9iHzv}CAZ#xuS&IjR91NKVdzYmeqY&b7c{j!|`+!cTF zHskcSo0_@SG5=*zZY{UshH9rG$d5QBA0fms`)EfnK7)&H3x1q>yvk%QeivU$kc z4~y<~$5alw&s0h3=vA$gdTDQ5s}}GFRb`j6D>t)q50iQxpN3>81z*+-_xd(Bxr=BT zHr?F9d~7-lGTY`K=9iDpcO4*Bty?{JTpBDCsC6biOpp1?>Fr!9#4tdHFTx&eeztZ= z-T5; zg;fGJb+wJoxexgl%{~B|W~(RUWa^77`PgeuT0CQ92gZX7Cf|LuCDUEn_4YcPRvkb+ znVoYM{tD$nECExNvC5qArV>fakyAi_6m}ZIp#R^xxO2d_y?}%Ve*&=DY=P$wXZ1^9 z;|0I@Sgc5vzOEI)=K=<$Gfdb8C|uhA0UoGxAp3Mo|A^c>IPJUg_M;A!BLTmKzCgi= zMcLt)U1l-QAVEKQKS@wHd*2frLZ@zUc*6c4C-X>$fLTckuk|gP62& zJodfln045c8%eg}>B&uM>SLw)#^Moe<;-6~(3kleJ|(YOga0$mlqo2f_mCW=-Zc9+ zc->aoyfZT~piDVPJ+Vn7)ku(J(H^OyUu;AI$KxoZHCeo>G#?x^yrx)Fx^TGtFRLRc z#2+%Z!#dg;TaiGK%nAm=rjxR+1SM?L#bGfoYMW?*>YBUi{%*-U+;g2zv(=s-vuiFR zDLAfJJH}Q17x=Z&0@(w3+Pn6>b+_uiZ6gRh-+9PHtA5*}c-UG%2@^~ReRU(Ppcg$! zyNV(UtiaH)*kqqiz#RvjjYk}IYQP(I^+a{q)KPH5V%MS1FFQ}*7gZK#{cYyq6MOuQIGS4KxYjInv9pxkh4lV&0N_Z_lOu5@Gr5s`lcxn ztig(~sZ%@>Kx+hCn3*Q6ak{Hi^4BhDs_DTavsadNL1{>uux~m&kv(rjwI@m!SgUbN zud|9AJ)9;WSvdPS4Icny8n=v`uOQv_{zAw~nEIzBhhIbNokH)UI>Y}8bhdsZluA4f zFJ?9PGToo8$vIc{s~eSNx3LpIu>dA`#1PrF{-U; zUc|qK(e{r7mN`le|K93Va$Cuiq1H)S7Y&n;nxCwAmu^XjvOE-HTJLa78lduz%tW%h zL2X?f6dN#JZ4RI;uk!qV;EuPDDAt;I=ph8Lx_hi{Lu(VKd-7h25<>? z8HIKq_dfDiFJW4RW|N3p6o=~cEsm_(m@cm#qrWu72OF?rA_Aa$A8U#+@a*Yj35j0@ zI8uw6tE(}1X#UQ+0iPof=4UYpe9ye8<0(sw362=^o@}{Y2;l#dPWz~B7Xk_k`1PoL z368d6zx@YJ^)h!*e{S^OE@|1EV0C5E*_7*zUdNnP2qa_*&Ujv@9jMm1E!aIDbrfhA(|e?g!`C7#&_OEwSYI zdEdb-INCh)M9=bjs}R?i(h5MVAUX{nzQ9Zd1x4IYJp0|k&4_@=^kR+epASP{8 z2&7c-&IYQ#tRG$y*Wo9-gcJiOwP90>#U)fmbxn=H5UikFLttif*Cval!Z(ReWn?$4 zXY~&}K6}D`3yC?KS{>t(@W)?rQ_=Xbxs3X;t?aK(7fw11&a(ODZ}$+4SC|k~pRKI# z{4Zr$(d76%$5RbmXp7J+Or>6a&m*B>!YDTR>hgG$ReIwyxp%xeX_-vNrI@N1*z4Ux z%7S61!J`jo8WMkFA{xTN2=5TfeN2M7Ikh0G$pzhodKjQ(x+fC5TsY*g?_^3XfCsmf>zV5@QUdbVr zi2+VY3^^X0R=Q#R%G~0afbR-6v4ifr#)46LzN`Dvo$Y;=@TU2#^QD~S*T~>B7h?TpS@&8Wi`50)5QaR3dj9D;5=q)L;spRqq!AQ53TzA| z+z0={-iA6v6e#E}W?mF;7=yA^{l*kJ!)-fp+z!t^;9+|zv4m&!cnAkwtthiIA`Nudwa`7b*DrJMMQ~@?d5|Z0=7^ zKKDMyix<{aqvnPUa|6CasiqwgMmHF|z@;F(2>%`gk&h4qafR<$qjL-yojRwVo0K%& zfacHc_i=e=rtOAHlu*WNwg|r}`ZixanI&NxS;!#Re`m7@a%*~-O%A2va7(H4D0MN3 zk5_lpy69Q?*)rvS7}!+)SJQob^c549{6tIDnJ^S7f+)0ai#=vd%A{hD&9)pJrH{x^ zv4LFqw>3w0Vi7+Q@)yzcsVFJSHyk+lc@`LX(>O3e7L{fL+x$!h1d_||wHO|7bl=B(?6LTuLy;|$KtWWIJ z&}HLa;BqoVkF=iJJ6cB!)pj%aXC%Q>MZ$l3Nz{$701bViILfRwOGS z1&%iKXeO2kQ$bZeLLk)m9smGW>bSGRcOgpu6XoMA{>1ZVMjujADX3@4Gh9U29h!2a z>eaQTnILbYJKmvs?URm(gDt@gJgsRW` zJkUx=FGm-Vx61!hWwg3m$WB&10W4uyt#0RhLDNq3xSfgg@^5sP1|Fg|V%|N%%JzZ%5+l zMX8+%Zi(VO+H@pR+n*@`2rLAxgMQVC7{#2AzYUzl;td?{ zd^EqrV;dHzOsnZru~EPggod*RUkwN_2!hw9PZ$|**$JqBMeE6nyAz=OX(@}_r(Qbswbfx!t?I?@e?ay4}+e9e=sItGufjHmzf?OC2+8v^vY5obs zYf@y%(qk`?a0YKvaKPg;x#bU#EbeDFE%2~R7!$WI;*Fe4!LF??qee{;x@60+iBnE2 zZBHJ$V0y^Gl=B;FCgdFcA=__r?FqWpS4(XBHOB4I$?^G!VH9!7Uh6kNTBvsYc~yxg z5?ubMl!7vqAT`vh9Zpe}@r6>fk~ZsvAXIz%WWgsn`r7+*)QjPk?Qh)KVYo|!xjRJ1kH^wXDX)l;$3pO3E!?6c-Nw9FjcdX;|= zGLu`cK0O8iYOXEDlku)ExF};-ZS4D%sO&$K;pJU!4Ct85Yw%!q4KV5>AuTWptyIQ8 zMx@uXP&1ppX3Y2M8KWxRHXVIO&*gQ!)^O{22)yYTv*MckqpPF)_OMHT-tk5@Qq@;@ zf)u+O^@y~FfN<}j4gdaV7=|vlLq{~PaxPLm^ty^xHM2W}YjS2%Sn|ulO!#i=ASzz` z2d6k(6k#SN9`0_3<6PZl-I@Tf7mvk_4kew*!Y{YtE+uyPvZsTr++&Z~J%0cMkNy-t zGwezyBJqQED&HhOcJu70!*%U`%+j7ca7_0gscRZNe&(K%mV$XuXT_t}kavF|wYj(- zK!hSZVZht>{bfXwalY$D$CW~JsNA|PLs$1}B3ZmpTFcfplX=bsF8;};1Nj?`hrX}h z`^WLFT$NtzBUJA06(eQtoO7RNT(0tUrR>dik8xmLQWAFOVO_+s$>SmhcN#6GeaFBI ziPCXd-HT674NQGn1LM4ugv2DQ&9P0UZYGApD2t(tyzH?J(e+Nw%Z>y2eEz;bse`rh zr%fcoTjqdM`t!#f2gnHQYvWnlt&VfRy#nB-BzX|k%@Gas-$r{!^2Kvmg~r4qzdb>E zcme95ig7WxKsW;JOQbtm#_Xx7w;6gL-7ap0ZUcjJB4lS2F>nPDq?{saPyG7z6x7pw zs@2Elaa#`%L)=K}|Gcq<8!U8{C13~nn76$p+vtPFFr7pBqc@d}ZWI;8JjTF+bEI~D z30l{ad|S~bqRVqrXV=ezg)wfcxn`DymY1wDq&4tG5ygLsmqJ@B&no{GB=x??KB%Ng z)ifR1>zB1IElf)stfv(pTyE3t#E$YrZHbEMOGru0{+)AmbyFbVF_zoJ37b*XSMx1< z)95=5y|}n2zX2m{Uk$+t_F$+~YU-Djlv#hbmy=T<%H+Q$RUOg@6ptee2|Me0+RuZB2X# zO^G=tg^xRt2!K{vURhpQv3K2m;`Cvge$WKNq^WOnXA#?}#wlB0zn%kp_&JA@mWQ#) zkUHo0?-BM3KB52mMb68#1|OLGf_41@N7p zyEMi{rCMpT6xXqwt<`g}2 zR}*$QxcneS5QuAC}W`L7~(o&7XQ2KSz zDj84IEO7aZs`+KB&v3^g)`dSy*WIC{tV+DUWyZZJug352<+nm#92=stntxkmTM@#e zg@OXu#>T*q%^SKE1-p@z$6tG1WqEaG*OB+S_Ik~K8<+-Uzh8@IX;OR2= zS9?f_oL&VjTEyeU$j1jJBWdV(u@m@Q!!e|zlbLROH5{n@alWfwn2x`&tn7Hg@O@FD zM>Xr5Rpbe`Za2PS04jY^Q@i9`xE{!TWtFu)uUC<&s-ofU zn0R=N?-!jxL+I^~Bf8~T94>7-+5x{o>wA`ARZV_wyGzOH@*3tbW6e(2@I)9P?x09%nLv_Pvf4uSd6NMTs0w7DIy(*mO$Sg2_j0 zea88tuJy$~5d$I+1$=+!=Xs|amYZvRP|P`M*FWd;1#|eO^iAJPOQU)Gne7F;^oNc4 zjo65tOQun*3qJqy{rTf3e5Gy!3p>@;Li%?zG-MSxUF&FYL2}4Thcp}-Q)#~LD=*&}>-(x;K zJCmL?^A!%6QCE+~TIWp_7?kNcx27)vH{ zLObAXRZlRTPP(+e)-V=S+UVG#CINGqO09Hxs1DZC>=4NBUp%VKTxya69%K$;>PCP+vg?gV9Iz+do zp?@5M=I=%u)!%se7;|xPjw)VW>R^==6+PTm)d9R%fX+f3*B1<974^~U%{p{h^LTXA zBT$g0L9tvwxD1iS^b_iO9zADCHWUMeyGvFX*5w!_q?NI0n(bp-Dsggh8(m-twgBh4 zIP9)m=^}p&)UL9LT|Umt4DOj(U;}_}Og`8qeR`FV8Ic_L<2bkq2J8+SRs(+NpD+>{a@X604)_ZS_#i z!m)EWV)*^u|Dj7MSA?s?D8ch<$#98?-~CY9rcsg66;jR#;u-|37HprWCZYzy0#2DT zkQ`(mtyZW^FERq>D8rheek`)>mDHusM zg7IrI{31Md{S8x$J{$Y>!|&RfL+;N=a+mmax|@97rZUMTrKH^5ftw+mES4l*7~_hNyxFu}>mrLez>4Ch1u0AN zqy28?h8CXdf%SRdGlBNl&~(_ux?hVTUi(8H`o=08UZDonRdEjA-|clDC9B%z@iMX; ziekN+e0DEA?Oak?3djEbKrukJ10lmGrTWeZ(kmU)$9gvTtY*d$#57ohPh`;`mt+2C zgB*Lg5~5W5C#)H|R0$3~Rn(&ZfRfISAz-o0V~-*}00 zvXPo}0$-(eqcfAiPl!-GB85IVKe`gMAJPs0>4*1doR;Staux>7U+>16*4bb|u~Foi z8Nv>Gg5d8NI+;EOhR4}>Br#x7HvMe<@;(mFUw9lA!P<18f`;~QTNe?1@_VcT?oXe% z*!@kt<|M#Gxv4)1h=XWdR~Z9nfxTNBB9dmPF33aWS5B*lXh|ul0&9YXCM%Aex6!x4 z7Bm6)gCB||IZGVM$TXM2A4wSDvT*EIZ)U$eZ()HW-n(=4%tK<+6o;*27N$rU3R$&d zhYmdGE<=~yct0l_woN;>t)2Mo2=vJW15C)o*r+9>>Jq7?$@u-kF##;zB$eSeJ1*w? zhylTA1o-$9y0&wmPP?STF6Uc0U-4{hI_esSbcW zmUO+qTfLv^NscXN?iGh?e`Gl2t!z6LX$*Qlt77g-NJ|+TTHr05vzDTcrD+I~riWI+ zI3E>UmF)u0*P>&id4Pje$mnv7(s>wt+Vr%`5hn0OnLIw&bUI{mC5?mkWBXEQQRyz( z1vQTRK?%*CTsN91oo3iO`dJfI?X%m3xVC)tGCNKk_UN z`$MT!IEK+#ci>4o?fAHo?f@<8mzl3KyW>&gZu` z8Mz3`iRqNwtUXP(MovNPy=GX_lqP3q!qu6JL^i99-Fnm8mjvHqW@L(lthWmCBP5v) z@4YXKjmfw1Vf?W;(yS@t4KcK>^0aUJX!|9kx0ght^ka(HPlhmuRkY8D{nGXb!Aj>7 z?smN9pxzRS*dHz1q_+*7wJrv>W=!lld5%>>BEsvXSI_PPo3*Pe)4?3snD6V^I~AX&gssFtcqWtuqy7G527>#DO<65Mm~Y#WTJtLYaFV}oaJUV_t5Hy;43!;# z$_~LT@1?)dlL+BL%t@32gG1AGzI}D-*xKE#`kBD?^{ine+D?AT< zgX-cio=Nw|Ny(vBk&Q9CaiL}oKd_?@l+Kzt0!)I@NzAaWXgm zsMz5J;L~)onNhm+VCi1l84l2{*NEq&8h%`5^XVBpzQB~-2uME-7wr4O$I$-0dFt-& z$>sXw*HLH~HIy0y%&rLvjHiW0Gv7s1QQJ_K$t7hEdwMUx#nf3H*fxHAIVcfpOrGzY z)8!h2h}N%coDdqQ=9PCdoQi(`J2^^&DWeWUgd))O?Rotj)*jKJ%9~!8!qJE#C=BLW z3Yjt;Q;qlY@BB{lEBD1-b9y;t+m+6|+NUS)<8_XTZ)te760zG~-?6X~yjMC1T2x2_ zK5ID?!S!}lceQ9JZ|-=r4^TA9Vyb)AVKEZ-GmPKwRbLLvSrhkGA;45T2(Z}usXb${Jz{yJo>l!VYv6hm6Z+MbWjUYqbF}l-c9-kjvQ)Noz-a#p%6XT!IrrcD(OkO}P(43z zuzgH`+qY)-BaJ-FP+CyF1JWM-L7g>&DMPdhS22BX!I$rw74yf`h=6Hk(@7NRfs*tP z;z3q--Py6vJ_L4XrZQLFy~-F=fA9BiHdTGK0ZNZP!m z)ISd((*hkPxn;yK4IYL98lr^)}GnO^HF zCo>b%ii{{nq#+F|eYx){v!U9r-11!5v!QvF{h@c5ew~P!Ly9ZT5$ZkWk_d)~2-)BvItGC+j)(PypsVH?LEp2#mmEU}n}4!V zbZz?=W?bP5zyQ<1IB6zY23&gKI`D@iK2sK%yc=uqKJEd6HSaM zwL67%-7QAP$5aC}`%d8>YM;i;n2HA|Jd;wAHCEd#ta*dxh$dpzW$F~zQif?6%8?)q>a+w*GM zB=pOS`1OP4V;k^QLSy6)kq62Sc<>V)^|G)RmF4|0-`+x=>HFoVaWCW+a44yOt;=z2n(H~1^kJt=8EZQ+LpQS-rC$pv#Z)gDfqZF9DGJ4_;p%93TI~}x zcV;fwiZ)S>4j-?sOPfw^gx>ItQj1R2nVj$D>iTM!G}IyzA1fjY>@|jkMkXLrR})Wp zhpmbV!m{=2S4j;m8h`3p0dFZTZ$?;o1k{WHS0&kNLwVBJhSU^(E>sJy?f=u z&mN=VN6Al=Csec0FatJ~f4GIWL}hXD_|l}%rK?&rHW1Nn9#*D!Pvl;jX!8Q0>3Sjj zRu}pqiUA19MjR3hyGt!@zhZyP(R`DesCOsm)be(_k)hjov3!3yhu?kiXh%cuS0qVV zs;ErzCVRt7=ZkrXh?&~{)vw?%9V{zhL@A4WJOO9fLl;snaja4G@jHpY-Dx-Z=Im^h zl%&+Q6I-3x93!J4X1t~;z4j<9QqYa`AS-wQZ}<_Cp-O>W9Tmk;#W;bbJr6N%qaR7`&AIBJCv2ZXvJ;TYOoD| zX5PCNxmhc|NfgV}%A4+zQa_&_7LNG=KnFF<&o3Q)_YtJRQ*PkSRjpdSo6TK?FsW*m z{527O*?d;qruKhDLg9T^1wdHR;#+CS#g9=Hq@lkePpbvAuZA{!mAl3L$i&}2TK`As zN6YTwmh}Ho3jFN|nN;DX;B@o7q`&VZJtK6{|ClO3xS3btDl;1xfh^3cimCd#XY{`P zekB4yUS8MV9=nW=cHQqygiPVR5V_Z?!w-+l0`G&qFb0Ojfq$13F;WS#!(fLayi!z` z=hdebrxmd>Yn8WL24f*=POUBCkrJ++lkio~pIXQb3r;cxqbW-eTg0|MHB=kA{W>~* zTO}jeLo$0f8`^};QNNGvIuwFX)>bVxn_o34QpbzMo^9!)HVIT?#;O&S{b@q2v-L@L zXrp3c3Y+c)E*?itbpK$>iPCR{aGQN+?D<7t^+J@^Qwm0pe)aO6jO9EFOnS0ESvwQ1 zop_#pss{R-$^J=eY2cyY1X@c-34}7TgP-gl4Dsy&!*$R5tG77H~U+N!s!}DFRT}g4cJu`@eZeB zN`A-KKEZJO4|tV1c-aZ}kUJ5CJNL$Cq2gd_@Ed>E-VvxoC;*(J2;F_ml&X^ChX?PKR2nCvuq@H1+Fj}SSn3* z!7NV5E$;wySFHfyEMb%;t^h2*d)y z`6*$AS}$9dVW^7wrRIoO6h<=-K4ES1{=dPNt865J^p$j|q%c>iwL3a&=LC*X)N0XICz`c%HDa zoKzC9u3v(Oxn;2KH!gqD82cz`oCP~=2a$(b1i^h@zO&6t5-BMtrs3p@InkLkfyi0m zUc1z{ZQ4(j8;sH}DCF|%=PX2!uw?XjYdwW$7B77q_wA1tYo~Q@n*{BF&R^x+Ex2or z$kE>K6$XS~$2ougZZKb;@*M~nfPiNH7owchwL)p*Ei@7B2EH99&WkIAE*ygvFB>&W zn@Dis$GqG#UzntQk}@)1cJO?9$Dicj$|1_gLF7_y91Yvf2;+P%Px*(^Nz5F-Y!Mr5@Npv z=@rF{yXVK^?kM`4H{8?VDtMhG_YR{FHNuBi?8=B&$t{S+iSaX_q zIS1>7yQ(^C-pEB>SBD(l4-gs%Os2{C{iSm}SvOQfHC;AQL^^>NQ4pSi2(Hiz5*xgyHYdEpJRw%B#i@?sH^JfHjyV}X#YsXBf!JM2;3<1 z`4(YO++IYe(s7p45q6iNnbOs8W=AbvG{fiQ)01hl4wjkl`PTDvvWRFzlo)5`(0b}% zzcZw$1FoBzS!0Eq*#k#$_(&ktT)N{?@x$iGDYY;N1;zmD#vm`kO;O+w+%*7n#REL+;ZLA?g z$DD>?tJmXBOCrGN;?j$wY!$ES5zMjO!)bsP#y%jHuhFMd=m zzaBuIwj*tpc2_bB>!(iSynawsxo%~19W8gvQe#P0ZqAj1BqR}&Go8RNY)yNhB8=4Tk;3AFnRP4SI62jH93{2 z;GuqyGiiXUK_-2J1IMx8Db9jThOI2XT(|#8e>l((eJdKCHNehJDmepA82RPef3O|E zA!OsDv^G4Q*p%szsiUAKk3GoF<;LZs8>jgLL5;@Zo?92Fe7}huHvM(bL@_838jY&H=C_O42KVxa9PrDK#kqBye z4i)}i{{kSNTYAz>tuh!V$;6e@EZIlk;f$@v{tF%_sQ+Y$_^SLhFw;m!MJwHsTs=9jT(`EB| z!!#APodS;?=B0x}Vn>HZCq}zGc1;vl$8y%zUYxLzucH>yVOQ<%ho`a8!Z-v4Umzd` zoX@b_L{^evaOUP~qtCk`@{mjEi`QdCE76ECNX^!91n#K@lv ze$UUsvFn2+GIw#oh{l=8d0T!Bxo&T`ndK1C25SGQal7 zc|mvsOih|pD;^t>deT%+^M(Jwh125L55Y{w?X2JXDgqQ|)DiwZ+W#wW(_P$DjT(-s z7X^I+A{Ut8@pnHQk=%12tKwl0b{g19t}6EkiHh6=JG@`%)@RNMFw;aC)7=@yK04%6 z2SpIhI2VOHlEr(?SL!u+9UdVB zJZI`=yxq*^mfCB)LU=flhvYROSm-{J5ZRw@?f@w>`|{tQq`-;E^7u~148$vQcPV!& z_qu@8zBAF@4f69ziAk2Or7QEp-!9ptqy$sd`w$WfkWWcYg79`WvmT?!bu(6(b9UF* z7>I;jq{PDKd56>U15)#+^F{43`|ZyHKOqBdt`&2ydH6rKR&;dKwQ&H1jd$~t4}{T+ zQcRn&pMhk&UO$V9+U8fI^u(;zfCk>N_2sFIEL@!27nG`|7RtGasfoart*iY^;pfcU zcYG^A{l!TLhoQr3X-x~}?a;1sGLE9}d-VO3A+Oo{)T^{I>F^j)Sbxx0@tyPE*Qs0S zA-@tm&z_>l#T7uL`|^;#GN1&BK~4|TE|{7`+s5&k|X^F6J%U zuDvc)PPdl=`WIsF=0In&rH&?ecE1*G;DT$rM+O+Tqh;T(4WnG}XF29we!rWQR^wh=xHGM#L)BU~g-*VgUMkIYy2CGSt+Gu+*@%rJes z!XbVvhN54-xBvL@qh`-Y zX%e(BlzB8nj?j|T<;ns!2hGG(#z15qW~%7`bNE3fAGed68fJkeh``H4>JmhzeBR{U z$RP`%V-pgS6+y}nw0hFO`>+A21^fGvJ)q3BHA?}1C^qwlq=Ka-g3G|EcT-ox-RVue zYm!aMGA8(U02ha@7#j){QyLc$MY>= z;YVx&*T*a9WcrO?IQL?wB*qB8_^{aN?7_eCjO3kfZlPuT)g?JAN6SE(7f8A-~{huR4)8V7uHlwu_=$W99j+z!+>j zb^PscxT?{QtHXoneic7G`Lcb~*9qb!HL)X!?M}<^wy&1T+0l4vZgp3QtnU}s=zakV zhb{`S%v)@5QCwRC4BzOP+%vKYVw{PX#s)uu z9To!)$UK(@k{k57yH({*^>Dq_f z4g!?VyAGZ1H;{LbU0xqn$?Nh5g;E3VmPOw8z#%hMfRlj=JXT7k-h-FA!tp&y>`|Pn zsIfG5(i&KLIHyTEvvbHv2bA;dUr-iwArb~@RU3YKC=M!1{ z*cG4n`S<>|s1fvnvxS=+d;9BmIKK*yx z7S%jBT`YWI=S)Wp2Zj4=C-V?g{~dSu!?W(^(r{H;pZ1Kug2C8FjdINy^%WVOaXJ)H zFadH}O4`J83Oz9qm3dF8EH^lidYg;Ia%H)y6I)BN$+u;9G-D~VTxt8y$f8qF1clp0 zU6&R~cw}UTh)vFK9nidkUQl=T{bJ_AK4J>K76+%prmt{k({P`Y(TL^BM%{FsryR?l zQU92h%RbuJ4P~7S)V7+xHByehq5Xds5cvgG1bl{Xzxcv{v5lWopXR0doZI;}b2=Cb zobMV9j%@mbHZi{#$1pR)uEC^dUxMf2kJ5Qy)<-O}5%@#;3;MDSq$c*`VpX))Kx+~4 zv`@jf-sxOgF?%V99;>G56(kZk@>PyyUeakRqtD%vc}TGrY_R%__enU{f1+n~-d<0a z6L{ItKkY|M$=n=i764Gbh3HysDdsaAPPx5Rd(5usY>)xqA*E~)b>FhqhK3MTm%7SO zq1Cf<)HAFtEcO{xQiQTuh$PVppn$2!92%J>MJ5f!n4MX2-07tIOhs7G-lNYL^L@tsA9m^Guobw&kD%^qHk;5*E7;O&Unxj2l!Li2 zbDER9y#Yx2eSBt5?Y#NXV^C1vckVnyy}+1&(8HJyB3z4_avod&`EfnJ__^eDuJ5y6 zr997J(|vpGJiTu;>`;EVgQ!v&3(l9N$&yn!V*Gu998HVq(b74i)T2l#7ds9?ZKzez zl}e^RFbvtLN3$qU9IsujNInId&6?*0kzB*-JsIJIxj?^zGk~NEvkR`?W3{t@sn>lm zq96*frmgipOvLm9~o(Yh&a~-md0H#y$w<1c%An8 z=?#6ca52*X+=*^Def;sMPQccCu}#T8-0NvnN*D5IRmJ5LM3VA0*PYbQ*04QRXXHii z!W__K$cStc)cKN?K6+C5Um7@{B2bx_={!8Mln~^Q{iMQ4d(2T9Yr-SYj7$Vp@chm# zr^u>*8i#wHqXL9u+a1QD=VWDU-=s{mdp=%{a(-wRu~;BHrPI3Y7|QORF}e`v2CY6$ zI*8u@hS;x=Vfi>t*MflS+2kv9IiRWPjflx`?=F;ke{2wZ81YDa4Q5;%DC4G8xtkLT4XpG@#oo zw)-G#zEtq$_0yP!YWodCD6}v_;9~r%2}+7#o0y0tiU}B`t)U^sEtFmp}n=u zoSoQ>4MKY&c-z16=gfxer#Ftre;&*?uoZJ(RjicK?a&8k((U479Zs(7FC6}2n!?tL zqnw;t951k+tJiKKcHqd!$jnqrNGQTn?u)K*2$!bD z`l;%wp4(bn4n5z3(;k^bpaL!VtKo&a6$~GlHIRDK=P~_49lkvwd?L8}81VV`Cm6bM zihXIl3AbLy)3-1O=glj3x!)S$Zr_RceWFn&w$xskNZi=i3~X2*cBHUp!f=x8dyVaJ z!dVOgv*L%bXlD5bHW~;Qh9jr#TQlKo$*Q5uygM^DU)R%A1%Khn*KPBe_NQz-{p=6Q zB}XOGe%fhEx&HOy_yF2mpx57*8(E3+7Xu(}vBi*u0^!%SeOn{FNi`b8fNl+lJ59 z++?3r{=|G8wJe?$=H~+K4A=16X47~-A*`d+O3*Z|GRF39fbwE=Tz_kj>1XIJ zI3nPBx*p`qXmV8?5^(@$=krUh8wGp7z51A?5Is&1%#uWq77 z56?(p0jBKs8g@D>*0;8Of1EmE!<^Tbz%0U!+8@t3+aauM8*-9-2xB}&GO1UaX{6*d zilf7IG!DlmSGXi3-NsSNX$wx1P(7!Y=qJ(W=H)1uxsYMDg+VfY;}d=9*We`LP)DU$ zWeJJPMeeN2wy-1PG0!wYY3hVFa12RH!etDz>2%5CqA#bhvDIiG90Fr@Vo}*FMm0ux z00O^B57oibLx6;Z4pZOvFapL27h06G?8#PQq+RX%1|$8~lKP`PZuin_trl>VxxQ^? z?(6CHUWd-~1C+K_8iCEX4n3*$w zU(D=RY2qEa`My7%zdsed|6~&_SFRmk#hwi_wsY6;h-FM;v_|o*2f-B zJSUm^O;T_4TB_ErLD(K&(4p2PE$F*%ufII|%67gkcfM%@w|aPyMwydNAI_G8oN(FX z4qx!VVmyR))cwl6<=!AA?tb?Duxt-YXJi+d!u;CIKe_F110vlff~+2y74tCb-> zLmyi~5jy%2*5Vd5mxNo)w%_*bOEu@(i8L+4pXRGQTrV9LV_~Ef!|?ZpL+RSjuFJE* ziJ@B`x&A<&Df4Q!Fn+qiA%qs&nU3K~UYs?nvj*%HZe%XG5kbs5NE{#9FBy@>q-;CD zfs3>7S;5>07TKgT05L2BLwkk0@)@Clx3s#U{fz2jbczFo6?PmIU+E&yr}?PrK6qketBTuX;AGNoqf+QX^XnJUXi z_pkwjZ-E?MdS?%F(rS5DoK21PSX2sU0lY{l52s6=Skph#W@}+a7{k|Q+l?Z zUc#b#F63UXrBr1I@*?z!c{EW5*nqCb;KF6lmNSo@-WxmH{^M_&(aRPrH73pe0RE;; z!lqJvtww4Jrn=I?-_<3I6KMtHPuUD?LX^zNXU%34A3qj0Z>O$*I_Bi!zDypaArdY& zQd2Ju=nF~5gNqC$5rLXAWPSNM`<)tmBd63CduO4qfaHf1(bF$it1)pQ2s^>;$kb5i z)UwE~wsnc$Uz4gyz{R2|)A@!)F4Cp_%DUicbNZ?$em&nmzc%ig!y>2W_t(9&-0Lh~ zQ*TR#>5O%9??1`BTy5*KnScX#1QghwarN;aaBUqq4?DekBgGh#E-D3fFzl0sLNljT z_r`8nh5n4XzHVKnMhiSQk@UqclWUYocua1xR#pTZemewV%Jx(cQ;B3rI5;Q5VPL|u zX)=W^imD?UH;`^Ib(dxO6d~|+HRs$MC$ANFx0j^dxet^*M13cR!K959pJsR@I}3Ewj1eJg&#jw_}O*ya1YS^TH=n&>p;uF2mH}T8|OS zPnNeShUVaMcYt#>Q8Re@P=XSmY(M>~cJ@4CKgV0y4!C8%z#G>LAExl;WZ`ihI% zqD>xPZ}>W3v=Ik4k@R(t>66>GQ6NkY1x-}E9>3X2aw?%Zqt>toA-7KMD4r~%FyAHu z_;@Go-AC#>+hU=7y1=k-z;yl>CU^$(!g#qr%`S=*x_eE-n~qvTn`}%%s5p!WguAk` zc?$g3x4QK>jVSIp`87>< zv6$|491MSL0s_QHUb~JfHF`zW(;x9Z!n$OU_TndHmuY5^Mv%uYC;XmE<-dMO$jI3{ z2zhNyZMB5ahIqs4zfY8%%Yaj3X;=p6^5ZkV7@c`mwGW~&vY7Q0T!r=F8I$0H4CfiW zEmn_Q95K`n6_x!}yhOszG&d(?&t_^@MWCy6@T6Nwm42_&h)$)69<$Y#y@ zdMzG{)+D&A{5>7DmQ1d;Bk4MUs{*GZ;zIb z)hf5!lKJcSF&W{cAf%SN(8rIJGqN(vAPWi;*5Z41OW&$6HMtA_d zo12~E!5kZ!-|@43F0a$(Slh>I+;L3_`2hh4Ck|NR_DJ%_CvcgverK)80{G0(?Rm}W ze0H|0dnznEd((K_j|`b?97O$`CQo<9*XRY@hM7?*x7zY5SwY z+k;p6vgNVe_bYIn?I-1r|fo9P-FtrAe`sCa^ zRSUhse4~^Hr7M|#Ldg9xM+FvEvNCAoXmXF&9#=l^sm(|Z+q-_+kc=@r?`50Hd^ z5z^rIfo41=m*H&SOm*VLZHM~0ckYgmBZVG3&}N018SG9r`ew>k)TTr683hj3=%W%b z`PsNfOHr-AiEI~MQ+F}ALaK02%t;kJ^ydcS!*bJFd++8-<(M}0WXI38?>n^uOcja+K@0U=YXHMN;@O03(-{$+= z(ki8q4(i3m@}9qIZW(PyL~}Mn@Y#Tqvtqx-?JE|IUO0{na?G9?VBcrtO#6(_1}758}B&ORs$kK{W8 zWBM2qVi(yD$xOp>Z+sXE;G<59IfZ;Ke-WKq@&zdt$* zxAlKpW^?_l%`WqfIT1eix>ZtGZ8|KugGYc@1RNRiGI14ryPu975tqwp0axjvgXg&c3`x?p`Msn~w?CtQdbYnw)9kaO$4240O3vb@w$!bq8vLSf}BpZu;k!^Rz1yGX?`zU~#BzcMz{Ao*A&Rv>hkKwm}= zcW%%S^~mlgBvjo1SHF3(VG*XRvulOA@HDvbmC|O}mw(@^eNtuD-=-Kt2{RfOSTs;X z@1*aQ8L<6%C~C8*f1#bJsLre>>cQ2U)Hs=4=nkeR9IhN?1{UZkbPu7Qo6%xVqPhZp z8c~;K`uzeNfUk_IiGBoqoQM-|Qy=k)lYYU_#I1bSc0-ldo!tb`%$Zkj24w^=bw zL;^VL41ZMJC^Y>%{PKq}g$BD;^$X;)5)w5OBUv(*Tu6-d<68R^>oquv>|Q&uL(?2= zu^*aZzHWHF@c8NrWz*`)iZ;@nah?(in_a_g;^P}W9=r%Chl{g7kKS0Rru%v}r58Z@ z4vzPyDvxw{qE^}99#qDEOkDz{$d%p3q&P*exz&KQ*~cFZC>J;TPvEpXt7x!f@89r^ zzk3DS6td|-zM?W%)xr0aL-)ti;1LiJi!~uTk0q(Mjl~++pGCesc-JNr)6%nx8Di31-IVE=%6Cic`BIbg#MTD{MgpPi8Id~2s&jDJoG zUkCB(Rm-%lKis^b*FT=3g?EF?hn?^5bgF<$Ym5HoVP7Z_7be%9HPMBu(>ELZQGXml zB7C#v*w=bMj*dB$kgPL;4pA8zxDe(f(jFDKKTKOyuXTUQtKa{CxiOqqIlsQPC*DB$ z=jB5p5#w)C-GF?fym8h(v9uSs?yCClm>YvH4p1S>hy;Uvep@Az8jNk{TuilOxr`Dy+fQlSTK2$K+n~^^iop%tzl)zm@#zmF%>Ux-4Y$bFn-wG@-^MM;h$lx zc8(QEB89}ShDHMlKEmmWNS9W{{hS+pw6G#1BubitfW;P7&sm+SE1modql-2;$2@~rj;bz)$AJ4D&GX#d$|BZbf}tf5 zH8sPo{a@sFm&-z*IjjtU{UT{OAxkGeL95NL$2As4HNU7y5FNNL`O%ID&Ysz2JLgk} z!&##TJ}k$RT!{5i_Hl|GH(k%RQPn|xXMB(~h$rv32$xDY< zF(6zp?z26Qa!r%lT2xfnLH*$5Ux1&mR$kUUl+KQD6|ncrIeyS253W~7Y1Yt7Ov?xZ z{tlAUuNc{OsL})$nyq1O!Aw>AUZcIIK171K%^-EOk0?rBQ&FTRyfl}4DHLJJGeZ^B zx|bMtcgy|hO}zfyCM^c22eyMcfflki`h%8CKNYE^({T7X&OU@t)P?;Qhs8*e8}wm4 ztueoCZ%6-f0>jQbo{Ih9!K39VboI;SQ?*@#k6=qK>)BC}{ifDX|dJ>ZDw z!Bp73SKRN{6kdv1PHH1-*xBT(k*m0ZBE&1|Y^n4X`+13{>k&ek<9)1_K4ZgH zbc#T%zG=MCc(D7oOHcj56YZR;vRZNP(CCln?1q`mTzoF3%!gl`W3jM>&~T9?Wc6v9 z@J~oF$}KT?yYd5H0?`zGS>wI0wmRKU6yCy?r8G;D`hf{7A>SpeT$I`PbZ>u$Ih*dw zc}kY`ZMC^@$d^s-BJ7%(VBV4^(FXR(d`wCaK{I0=7#!?;K5A`WTLT~Ss(K&qflKgB zER~izUr*n!AGx#GU>$SMn+ChJRfBUr6VS+FQV!tG? zb(duP%rVLI(A9M4a#rB9{eHJSWLV}EQ!}Ep(o&uFLlAU_0al!ZvEUVMvq#_-z>kb900nPscXo9fH0!og#ZZeSKLnv3k#`$Mx&Wq|81q zwDWpQEAciGq_bI*kMawC=yX|C2`iYe=`)-Wk)oHz8QQ?>QmTthTDE$DmMwNZxo)-{ zUEtc0oK1#FsspM$jvSxA>?FM(taw)0DX9oAumV$+I)k^1qDtsOY(ecxU1x7Y+G(0YX5-$k@zh{|0U2+grG=P_js;ME#z3EPS7D<$b8A-wtvpP zjc+7TR(p<0#C3Gt?NC-$>>91}!E@Kk(NVaD5ND-5cHs{v0i`NlG*CsE?<`(2R8V=M zKiIRcukj4k*3?vJLyFv8cBbY>JKqO}`0L|)-vVkOOS){oFi+&wbafuFSyGY(Q5Vq! zG0?&XZoBrY#3I7PGMb@b7MX~cLPdJMrY8>Wz?l~zYQ%2qr7d-%^x45}mX6Bl`@PP? zxm`G+Qv&G%tw5RgSr*?HSD|M`E2A*_>^F;Gb{zS*3Uf=#2+sUrHBjzRMbi3Hx4iGg zve1i~@|tCH2wtW@Ioczd?^CbP^HP>T>wVWtRyg6jo!fOkx6s2&FX_w9vt>~sMr}j; z0IO6iRjI~RLS6KWK`YWQ%1RP}l9X~&F{)mtlP;E6G3t>$fs`cFlO^ut`TMM|G>OUk znOS_Q7}C6gve5hLWXF5d`hpi0fsl3_pM|BnYXD-9vP9gE;BBmSm=o298zNZJ>} z+*}|N8?Lb!-Q>b@y-yx2&b&T-P$*Yw^Q;z)qG&_PQ8qr;dk|!|Ru#hZjrc|ecPH>6 zD(dsyh=|L@K~W)4wD@_gkSQhgD_u!WVos`<0)-*+%@Rszj#r#rjURvC;U-!Hw0d!3 zGXz*nKI!#KkPEC@+Bb)FuGXt}Qbt7kn!kT-FOW=H zF~%98V`5rfYtbb&{s~FNT42f^xG}e|PRzudZY5CB=V&u$zBtWT=Je^Ht~%6c z`WpCH78pFRxzN;HTT#h%ePE&zftt;j$9?4ykm#ZrNS#^}X)ONs8j-mobNSIpR3dvR)IC4kG z(pjQOa-+6IYjv|A;(Zt=KK)anG*e2vWiBi1YKEPvvb1P;fpfPlQ=~iFvEb3EAnJbW+JMN>IG z&ANO=>Ev=B%1C?S!6(k(OTRzXX({N(-V1ujlM~9$2_BodT#x$u0a^Vr#bn9uN6Uq|HIj@~&G zahXcN%&mV>#5u~@{c)5{5>;%NI{Q^$@w&*=&HR&mosuGG_)yZs&cS6q3$^`uDzBI+ zd@IeC=*OiZ$XT?ZHv*l>=RVX*CCK;rdA**qsT9@naRGejqz=yFT7O-B*X-@1D)rOSVcH^L_eWUOE8IgHOtpSwI@S$q=%7D|)9WHO*Ont8}8Eye+SzfqeXg z9`(o>{95IMX=Qo&zU-_1tu@(;x27bX0++jpLQ&aCkeZ}~vL@U+(SE!wJP-S(63F!4 z!!S|Ic7(69U?Av38$Dh&Wj(_{i9xULh!(wL?2k5XNe~jW&`@YvpnIg4K3q!rI#sZp zx9)ZI1@WdBFE$uPcez6hFFmze>(t8GNiTLd11Dgb;&CPq>348CpRFF>IkMsF72#J| zmxS?vV$_b8LS;Cv5>4;CmUjaH@kDKke>t&q7SXM__R(gh0^A%>Ko$+gYlY1fujsy5 zA*@m5IJ1A5W}&!1!;v=#21XNvr*x?{PFqzyu_$de_gj|9V{cFZ=U8$;Oqu6fSk8OqFlw$WI)yeqS_ z(^_xI4;F3Fm)TX|_~`x8aXou|Duj5xf|ZPCHKKH<3OA=E9f((&@xvrR--Xp`Ay<>0 zJVa6wP_YA&Rg!fSqZt4R+Fb`vRJC130s+8wVr40IJXa-c&~;Hc+WPFd-1vRxxK~ZRVL_?-u;G~yyZ7#m zpwSLf`i_hW8OJ&&`!1qiyU({=?Up^WP<$Sb$z4-Z3*ZZEv~N{rK8%b^Hb~Du zBv%!TR}sd7y6Yc3K9hUHgZO0?@lh<&;k;vTvqEwpo>$lVJkJ*4P~p}e_eNGeSX35)imxF0r-R|5z?BoLtH;Jo*Bp{znK@WP=1$OV^OL?vTxZiD z&|&WP*aN3VJVCz$U2;2CS4=H4tH9y&PT)gO-8>NWec_!m|F|Z>=67zlugW>_tQ2+W zvUqjrOnyuy(*+Op7oZUuL?RoPWRXufe)3k)32$~P_!4lHkVtTZ>p+DOsW{fspu6t< z(+HRBl6?{;SMhl$hBXd+WB-yd&<5L~TwW>y4KdX##J=2{8W$F;c`$C(wC#U(hELHVs)kQBX* zVq*f2L5oxQp*y{03l%#BNj=&AemUKNt!IA{=ERJ!bfMK>poC#-Y0#sV3DUP_(Z*}p zx2@K}nw-|{UQVh?4Vo%Vhr@U((S1o#EF>qxvPRcv`i_7&%nic(7RTxb z&Qbbs?oP}lSF+^SOAk`fygiF;yu8ui)b|TTwetJQuJ$j8eSJ8;6j^Uh zt3vMh<4(UD;?6cy-O0-@6Er(#PgYjkFMu3H9N93ft`q><$~t^S-F4eqZrcnwy;T(& zCeF`Ca&k-R%vym^a$LsM;ruUMiuoZ3eTE%KW?(*6+zMG5oZ27$<2#xYeVvv2 z@m+Rms3pGQ_!0YO%aE}2f#s)w9e)4`d@;+}EkohiK$g(y7}n*Hc6T64@NW`shD$_j zV8LotS{#A6NT)c>fOW)r;dZEo`*{JE!T_lqC{X0Lu) zwA5ZvqtAzpZJD=bIhAr8Ll`l`T_JvyPGSJvI1u?(9on{0UPo^g^tu;KImt1@!<07$ zN>U*pJXmhF>tjEa+_M6+l=?-u{Xsa=AHh{v9?u!RrxIQPo3;n7iv(_w@4=zN&8=pr z0LB51B6{7AbLZ2IsuN7zDHdk2x~t?WBS!8Y>~*l;W6M=t7gsuGh$h1h5K)NkSQ1gD(w z-Sq^47X5lGCmp`8c~@aYdBJ}>PmZUsYC)~N%nnom-|?8}mb$n?0SSiN1IG1`_})Wv zMe4(@-B0>5wTIct>M&RaYwOD>)MbJ%VnSbUgESVhnSYY7QKi8%iUPRdxg16=C&!Jl zQaCsoO)*uBY+Tg!r3kFk_96QWuU?>q>fN50RjPDdk{8zDQFN!h7u1${Z*^yeELYNf zi}4wpbO2d99VE{y7S{(fg?lHUa>-Ny*U?D8#KdYPU$NZGGR7 z#54HdUfvi1`Aj?Zj5x?69G*XRzL%lEV=YUJLd#-Ywx@yN;j!2f94s>2I>Q?V9|M;# zb1TBq_vaz3&h%&8_wWeC}iXo;@l?SSS*g z^Xei;VM%E=OgNF#fF6j>^E=t+Jr@Ro%g5JXwWk)2&Ymm^)GQJR@BAwL&vT#l#vGJI zV%zTurpoMs94-AQgN#7zD*Cg6%dXGCv+-QGw}<<*6!?YvSO9tkrtA;JvyU6G$|x-9 zY&gE|x6(ODdT4Bb8L6xk9e%-@WF3A6&Cxi5o^ma$sfMhj{uQ7p9c>;-|8(&zKD0$`UV|rE9h#aZy#qFtL>SNiRbYb1f3h7L6g2_#n5@5;>1r=qfC0IDD%77;&*0+` zJe#djdZk1ODh||pq{Ouqq@g|uGs}=D4n8D4YMQsZ5qfvLXlN1vPK}sguR3ZhZ>qx$ ztl7TmGzP;m$#xRMKVipghOs2c6S;e~`SBgm*ZMv+b67V9hk$QW|5$x^QPv$hGkD$v zd0v`pQTxbs{MsEoy}TQ4P`peJq$o~$ddefH?4sL@w!oY+iSG$3lJArE{eC2#(P5>x zV9?G2%f*L@291kUZp}D=pOUPXUNlxQog5&gGr4JyT~2u)95bXdOqy2P(Z)5}0NJ!Z z$)kLFv}Law(%CYeVq0C=t)LUk(m(igY1#!kc5ygKNy;&&z}!qph5HZlTGs$Cu}% zl|DnqgV_R}O0C%pp=}-XAFB=69dqWEC=qh{9^+q%S^A|??f0h+>HEsVqn=>l(nDEY zV0iwVhd*+&*;?sUk1xMGWiHoKWusLTZ3hDh+8n<(qW)M{R#AZpd*W&|2XhfPoOlP@ zrDvo|kn)}m`cyk|gvS?chzv^}Z8)x%k;KzbzUH~O7zOY3j4uV6S>>a6GILe8gpf8? zFQHO{lz}Z5Efi=`e}Onyf{>^lB!4j>d&}J-)hhtS6QPK-yypRzxSb1SVh zhhWG&yUB&{rJ`)Vlw9Pa;F(C5CsCIRimLE4=?V( z;r+sId3VcWs0Hn8Cc{0Fi=kbEFh2M(#UYW4;9x4-E_>1fHP3Wxt|2&=mB}X9w&$3S zH7k`cl0b~i;=qb##8kY!{I0Fec zzZoAPHz*^8Afwn-mFWnSWoRYV4Q`@G`}HtSCs&Y^Pjn_%*bT>g*Cxk*l_Mmu@1S#L zFx=kjYFzan*M6-iYvWd|e>*9TXe`a?fS9WVWF%nDu{v-I&s9sgDCoL^2UqlQsThUw zKS4TBCJ5uh$~=H=#KoLbh*8M0%(ca}+>!P6KA7KV1e0n*P8t)sfyCR}+u-0Vlh;mG;_QPFm(wiG|F9fb_Tymq0|ODjXLV6o@E%44p zY$6l*zE4UD&q={z`&NqFn{&C86d{vpx%3&ZBpuFcN5>A-ndj#XH}``j!EsX(aI+w7vFWi9H#)2WxoYz^EKcu({ z)JqhVwq_GDujQ>q(yRDZ&8EO;3Bi$oRuB^L1cKKwtao_$+o{vh#w%j+2+!$C?xsJ0 z00tbc_)}Ck=k%7q$Mh9*Zc4q7?~ipo^-(r+$kkO&))KINu}njzX>PsjS9r&>^Yv`Y zhaX0LT2o#&{OCiqeukHF7wtn9_*;#?7|ZOJLOydo5~RHX;X&=c+YU$bNE0msiu5y| z5K1kJeU(dtEoqMHQ(TyXRjF_Y_WS{fFd&?+Q{3zUO*xtBA%tK{XCW97!rPUdg`Xzw z$|!Z#+SMTxxXRTds~+P2Wf$w9MK<7fEbz>PZhv!0#@~$6v$A2 zV6t)?5WAjcFhfIz@?%&idYI(4Sww!O8o(wRYr#iu8C@6RD4@D+#@tqMc8K31`Az}e z`;rcy4-XT3d;*e*iaro&(MZvR=(PyuCk&H>HUx@!qSG$Q@qJ^p$D85e{hTS-d|-v1u! zuc4!V16UI>{=h-VBm8L;;AH=?EvLRxuhziX$i?kO|7?s4F^N=+dWROd0$iw|<5g?o z;^GFmcSvA=0Qb;HoLd!gYbu*0y{NLWvnZ~(Q@I;vgnT4Jk4n4ucxruu{oRvuTc%Z3 zMt_x~5)T0Zok$LLbUwPB9AWq$O6+VkXp<82;&6{=Xn$5oOj3))E6Net4E|kmYblh4=@LTsaKfg0`wPT_)Mb(H( z=dVp+ffUIz5|`I+cDprITO>>$Qb`vBy6Ecf_Kv=!;KQpV{0O+(cpLVU3{j>gNAn#AtO*84{LRg5Zj*q zb5hIgD9?;BgaqMzf2dxpjs40Tp^S39Rj&m9pO=UI7m~m~8D;%6#%9|75d0U4z&|x2 zKhz}e9@Mb>Ur1DQg3%&fn>l7b{7)!20Q*2X8$#C*9;ibjN@Sy82=ZvyYT2? znvo>WpZ*)|A0Vaw;^mwyjbi_e^#76C|Em*QLuWdP_hUi|Vq$}m~9HGO>&<3x@!PPhIPEd*H_SQ={Up04HgZpG#+B_W9p zFbMriT<`;XB-rqDyya9t`<1SZA5R3v?JunX!U1wV>;u_-9<7 z(RAWlZcCcFV=zDv@qpV*k!L575jRXUx5kT+%0!sV73R})aV6C1OOQy8g4^fMsja-! zX7D>1em=g`c5p|obmY@*UwX0O{i%?^OU3^2i<=xk-wmvCwvD%*&R&KiPNHPJ@NQb! z`OU@U`G1&tKr+ULmP@n!#*t?0D z=0{gxEG(^Ed0j0b3>4`#ikszdS783_=SKrQKvQ(nzuzdHRuO)`{M+NxyD_FeKRP}M zF)0b5>oYCbjHs!`m(2Yji4Xfk=GW!;7cj-6yxYmMzlxO6j~rW;#_L9;&I|E4ww4VN zYJyd8C&=%}Kvq@M#Ux0|NK?)A-9TO5S1&$Xm}~>W-&5dKw;jroIInG9O%$gW4JlMp@BY{*@KZDd-MGpw^fj^Gduw{+-y~T#x#Gyf`H&3U`t3U0pwbhk%wNJl^ zx`W?14-JmRKAn2z8DX7afHx!)_xZ>Pf3Pt#gZb6Jb5cTXbckJCPHSanY2PYQoN?E% zteaH1snhq}yehaAg#T7AlaH)J)#a4OxI8RWGzX7PS?Rteel83@=*d{@vlQezP|~*4C2r! zcb%h!1TZ+7qiQ8X`upi~z#n-k8mWFWvZS@NutbpE%RQ{*s%3%th`U!5pk$^DXo65( zHLP)apZc|AU4|JcYhJi2A*yL%;!fwnnzkQcwA(S__C9++b_CLizP2A>WQR(>TIrq8_vcu88SuK!3Gzz zno+w`f(zL0^UL(TiM9q`@FxsqcJLz3u1V@V(9ZtbdJK?&h=f>LhRr-E@ayBk3-`m$X&*~R?&$<#1LdR$42Z zChz3NsW1Dm|A6g(Ep>Hm4Ze}_r?R@?W@Y*#ULxf91Z)%86e#iy#DlsbNf;reg!gen3n>f&{h~S@N)P3GlElEenC>mVKLAeZ35oB0%x&wKH3% z8N?xXwhOL0%sYwvVAWLuWKb1SD9+a2;-tLVNgA>ZV|5GXz#r_F1Q$PS-SpQbRYu6m zwDha3GN6VWThd!O6KmuvATRg_UAbqcFXB~OOJASR)ARB0A2qK0xJH-al%Flg`;>zi zO8`reYO=k9A1uG|lveaZ5`}Ay*lJlj^~jky(c{FN)2FKHeQCVa@+N;TMQ8CJZ#XL=|8q{CF}GxLVq9J9VK! zd9-rsL<$^kavB|2UH&}3pawo%D7(;yb`%9GU}ZYOl_C!$kVA-*&*$yx_FCO`<@nID^;kSLf#0&AK|3&3~U*ezB!)&^8c2K{)WtKtS*a<^^%n9&!kv2|&W{6!>i|9o=;}oR-QSgK6&^M~s(VrU_yK>0g#@n_mf09`RtW;z?@d z@a*)A_<)#*1fO79R@Ws};n<_8sGUYyv=GdA{7sOG#lv7HXKuX=A5bp?0o&nJQ-;q9maL(9BEjba;Z&bpxi z-dS@BM1Xjn;<#ObkROs3yyZdpL?l-g(Na-N!v6l=-D>8&;1{3M`PIj~IP*XN0O}eq zDG?X%u#^(p*#76E`A4n!E-9Z*p$#;6oeP4;REH{#Z?d)U{8E6LWCPaXJytLoXy8gn z?cfViY$8eEImQ5xIe}%wZYDkQZ#HuEk=j@i83Eey!pXcrvLRTQCu{FCzgnOl1#2Hjm6DKEVP9@%AFS^aXxj1}b~tzlbJa6A>9XD!s%0bS$XMTZCd*AD<@ja;c)l z_0=aLyxQ}4fH;lYJg}!BZ|C3Nxj_f!PnmqWVCUv9itFEoI|a~cI=l7qx~VS|llmO3 z_?`si*+{(9{@M$!s>*0DUT3cKAIM*6`+kb_>{==+qgSTD^KW)M#Oym45FSL$qL`CJqnu|VZcNtNp}8|-JJ2CUgW zk|9~EqGZ%e8OWl)wZ&# zgq{<9UnDp>JezJc!n8FV8u)+Nepsra+u<0i#=hlSo+$<|I%}>sUL;B}i zx?;fuiZLae`0viNwEn-ZX5ft2-b#O81z=`Z|I-fO0dAMS5C{6Z9eB-WVhua(Ds{tc z9;BpA&D2Jka+OH_@t4RLzJGxqiG))(afR`g`N-wwYw@2{V`>x$ySDgJhZ0I2X6 z+VpU8kedGi`+sH;fI)_<%vzKh@PC{pa4+x#{o??*|1|zbOn__yu!Y1PD=EzW=ga?% z2`~wR0)Ld_24ytZ#{bu7wD{mPhjA*Uqy0b5;Eo?y@I*Wa#$Ye@zq%ocf!Dk{-4gNl zf5!O7z2hYVFjD`2Y+9^h)P;4L8q19?Jb&j#K(?@;ho#NN6k9BcZN=;&&oWbwZV;jV zy#{FT$dTdU;j*;Q+i%*@+l#kg8o6P=K6Me`>K^)^wYYEtFKX2NrtaVChNx(8T<8L} z;qr{`qMCgC*9-r< z#DV8?@b3Q)82pPc$#Ab#WUPPwYh*wU0?|sV_j)B$@VY{G#(OK8=a0p6Zq(D|4qx&8 zSMG=Zi8pl3f;T&NExJaLf0?Q5Y-rB#Rl-TqKh=fajUY?3Jm32~_B8Ht@7t-j0LYyG z(M2E_d1Nz#RhhDv`A5DOqEBPgq8nCEw}&LuH?ob z{_c2D2~ZX}U1lMrXQGqC&1oyYVW<))S}c#V`k$=_T_YBZe=3I>w!bw4hNjx2H#G_E z41s#!5+#I7@0^3U?A%p{o(CB*{2$ThfAyR1w*XKg{4Jy_?Tx9o3@qhY?lWhb)#kUZ z%8Ex_4XKcSnj33%fe))DZrJvU?3O1~l08u2piHPluh$L#S&V;V4PO4C0KmUbZ>%~X z1#0d49z(LDX1q`m|9?bX#t^5(=>j`!DYB7C{PO>+ffDABkMghh+F`jzur;Qp*ys8f zB*|>N!%F#I>Rjy%0b>}8X9aDNcs58fiUSVU=ybz8(;U~evc9`_C$@e2*Yf@60S^BX zx=$yhwr;?hzz-TfGTQTigB35xV^MA9?r#(sA>>T|0nfki>>m#uOODS(ndEZ1$6ORo z`GvgN7z|x$YR?zc(%z-6T1KXHA*^OLO@bLxPsJf7?3ERQ~ z9`%E2I!Qu~fG0=csAsIi%Vn(dG?l&_{8+Z43#07EZ6waWDb78_Drxbv^>%iG8%bkt zND15TIovT?zECMYRwaSC1PC8w{YEO|Qzqzo(+%@2PB$LZHO%<$SAP@s{TLrfk;AU{ z=acs)Qg><$vf2k=X=H(--qRMm(pq)rC;FjFqhA{o<)=CC>u&H z$EKFT<*hC7uK~cG8E|AowtYuvK^cYoC))GRK%sr8eRs*~N<1Ihn|aGm63`3|2m+q} z`8EF9OCg6NNMCf#)a`x1)gN*6*Tt6kKUXNsf#H1Jzf%I~$=}PT42aO7fdj~K|GI)!1q}Cv_bwXm zPtyQYsEiuWHP8Ys;(xBdsRP5kNqHw8_7jYLjXDh&dX%Qs2fzPZ@udZN+Q0ssk&W!q z?7L@AeBJqO=wy3cY|fJ1UzB*r6D*x4ahN1?yqP(Ra_p|8Ramf|I-eFjGWahUSG zz}k928ol>SQgZ6Xc=G)Ein%s{yeM9kP7E%#2QNcgj<4ZAA+uvknRZ=}` zYI+2awG4sdJmWA2*d=v<))U7AB&yX?m!v!CMjT!bAxS-&)&tkAQysl>ZQ>FkLpM_z z+vDKrGe)(=x$P#`S+$0V;+l@O_k|P8+q>ll{5tb~{BHxJoF+AQR?b(Bgi`g&pES@- zjjrL=<#u7FI2xZz>~dkLfOushr%H<_J?_{T*zhtZIJwRsd#DrHhjp3?*@wG^TK!8+ zOj)rL5Kp-|T}KpHpEP0DS(6Qu0>>|?)p9yB7pX|t!bc%-BLvw0u!toTU{&!rWc5++ z)jd@Piq+z=u|jky3aD&GW=gnFd7|jNQ?KgF3v7<8%K73L#L({}ax3dBIj)0dR=!%U z)GA7E+R`0lOVebeNOTKI19jP8kYFkgmOM4(jf_7p#ywla_rP2%o4<>(^23h5*+1iy z5eqy@Na?J&&9)dw6ZPBM#wO4Iil>&aHRQ6jdl3q>obXxBESE4aWE(9Ozr!fV9}cfu z2G}iC7S|9`tZFl%O_xm>!Cd&15k7QK<8)hB2B)P#d#UJCvjoM$LW!#(Ykx+ z!93h$8P!g|jI@>xuIS2$WJ8>dKk+ZP;>^;`evo-+r#{eDUJU{Tj3k(CG1QnTyV7+S>=xShx=t?<^D~ zm6=O|)q1#7*em2*H^Kz>3T}ha=IWMrFTU=y#62SyI-Zz(3HQkT$mFK)aZbdCuPlSO z2+16l?nl#tFKGsGZ9Ib94lQ@rSn)&t(OW5*~Q*jfPK3hjM!;m!oiLB|S1N;cy59QITVN2&>R^1DO?6Vm&Iw7J&m!@Ts=Mh zw-}-98G-3B*=jAhi{T^q*s+L7b9L*uY8HwjqPqhY!ux6C?IyP!p1Oif*TQbbLrQYD z?p4_96e8dH2+1{UF1=aUmzX$qZEHH1$3{-?Pgo)dpS@4yu5ZG*9wz@3yxJ;WiRimE z2ZO4d$#km`(uhoBP)>Brb>rDy$xP&CT54cAmZ=pZ4+{<~hpi1OS-Q1^ePLQQwdu`q zhjL;Sxl42m$KZZ=^FZXFBenA;yi{&h#T-XPdB$==Vg)*A_%*5h{+PL&P@&byMEM}h zJS25~qR<$1&*h*GQsw?|8oCz8$yf6V;R9-*Hv1A*zGhj;*20&iE&HebI%ReRFzR3PLLvoU_;NoxDtypDlp?IzK}}pRKw=we>R> z>u!sgD5zrBobZiq=$D-LR<@T#-%LwhpMcNE(cFnK7owH{MJXTC9D{wl$UF*>DdIV1NuFXJVlpW-SV1Q)NR69@+TXHE987Z?i-8mSy1FQSk;+K6F%Oy zfClj5$zA(rnzL!YcVGxw*p6AR4@jEn55v;Mqll?DUGHq(RFAcZd}AAD!Ahta7|32- zZ{hjsy&-f*72iiA^H0I~9yb2+_sm}S-zRh9b7KPq zmMjy?Q)-&@Wv60IhwM)itnt2e8_D+99U5QXUEgu~JA`Givd8bJp%SvNUc+2f1Rt!* z=J*DCQ}W$j8=ECG<&B$QW-wp7(n_lG+@-21!DF`v7HK%?G{BuYE(YfZj~<4XU!LFD zDlDhz4`XD~^BTT8xC<0H2+J&A($xADGjmYugh)sjG1=}Ld;Q2Yr@8G!!m^&K2Kt-C zzdaoaYri|cV;Nh#4(A|dj7NMb`j0L)qYq8h#UC2GjZO4TNPm;*64Z=B3vj-fY7UNq zgLIEk9PnBnbX&DlQY~~~KzgEB?(}Zr!BdsI1aC=gFz7r14tL+=1rPMcj^?xwcSiGk z$Yo8ZKt$w84h&F-C>a3{kqLQfZAwOP9TbnB0HT~+0)<+?z8?&}v2hf45Sh97i}HU% z`*5^F@=t%|T@RqrRRKiA67W&LCf_fI`QsBkKt(eg@Fsl#(BDISg36m&84sRnTw!nHv2w#C?f;okr!SIsbM?Mq0pmkABJgZ|n`*2gGS{ z+b5##5Q^#BlJg2!m9#qb0)O6x*{lYN*?%@ zGgUUJg^;AyQ%syc*Oi)teip+oxCZ^0Zm#udu-I&^V|reoznsEOl}=Y?sQQex80;S_ z_o~2i`5gVzRCjPqMYg0aEVlrVL8E%gIn5UK?V0JOg(rR4-~CxV{~`Er6EB&r>MFtW zVI%xFUfeBWdTj#sJE-1X%o^>8&<$-?-Ql=i%rB!+Ukbg>9$6y5sg0{D{YT%7RnoTPXzQczbb7qXa|3VK~5HLRUqCm}ka z`TZynRWja=8DmBbNXN;SqoS*nV)CfX`t%oCx^*~DL{yCMXxR`Z`IG-ls(ygD7YvkL z=z7sBjiP?7GRoa_f{^4>YqnRGXG;t9FA0bNZ64{{R?t1m=m z0YgUkn`HnN=G6&TGiheo*X};iK6CSFW_EtPwEK_@d<&?GN8xqqQ{^}0kH9t7pwF5h z=*_B4K&!ptC9gL6Qwsb98Q|Rn!8mTirE^f{$F#MH!`$KQ;18uvmHo2q67Uz6?hd;2 zf&Nd&-$jWHYD*RUUFaqd1g{f$oxS^lJf(7pHxj4BlON;0{`gFFd`eOlzFQ?^0@+)P ziMjh%K=F?Py~ebro5tXy;~_ocC$I-HlDt_S!@*0GXO2ujRd)FymG#2KteKs^yX>}g z_i43|JRRI$n3uPdZ^ z3tst@&N3dZGddcrnw3R=ojvnc2@eQ`cLDaKtPsNy_EZCu^Cx|b2 z5YK-3Z{RR20t_ajmZv`;lLn)Nh6~RToME3BBWzK7wdsd(0Pvd!)LO)ZL;v5QC<4H% zFa~fG|I`uSpRWLdn}n(5uJ3=U6acXUfLC;uvtMO|e{oyxel6p1T>li=KXdy()B1NM z0V?YMM}*S<@1>P$4N22m=I&5d?>AVoSNo~^l;&R-&YcC^Am_a<(}Om^DG*=;55l(_ z4}a)bfie{+{pnP^?Cw?6pXf&kGQ0^q!niMf5?LfvH$6h-JI1HTU-JJ!u72_mR(R$> zk>zA3W;2}QAJuR$u-MN>gY7hdy84l#N^f%e%0<>WY6ioN1Z>yJg-bo6V-@U z<}QYY*800z(%Q-gp5W8~xp7#fZ$7&r>*$aP#L1D#&=pSKkNy{`zH4YF$1lt+6q zuB6NgynBmzyn35YF@}`1T{ta#@t_aX_G`!LeiP`rCRt? z^(1FocxESyOetT)BevC8nRIDMn%zzZ+L{WjNI4ds&S()|D@E_*QRIp?r`)EnNK?Hy z0Qj3Oklr5e*l}L~XH+OFQGgd31SSAFWRmIfh z1?2@T0A``d+tQ-xdRaUhAQF%!PgSC;a~qKuPt)PIuTOsovEwg~wIMt_yY))wjYjA$ z&LlqCsc8i*{@!--K1~TacrvO?4GTlpSY}7e&i2;ioM|_aaT^69%jsO>^X^WEC*5$^cn{_45?GrqTHVGm15SW!*aJM*9^td-i%)s zM0`bRYZrDBa4qDaB@_eF_NxuwzDQAkfLY`TeW^62$Yuo?JU~7yTBy%>vkp)YV~TW{ zKNT-3t<-#avaed5&*D3^Z^P929k6{*%zLw6ril=RWTde!&e0wnUD)3qE;N5GJ~~>Y zJzr&Xovlb48q~^dSP_7*5xn}ttO+oo?5xOAouj`j_Z*k4`>9E0RX8Z_9F>2vT-Z{& zYcr(6H?jFZ;p|hHn1R{L-4i@tz zj;==Tl9fOYrrkdK&op$k!t;v5be-kYps1jT+hKjA7B~VcvYLod*>ZuBj*gNFZgNs# z%?_*!7jQ5oPXQ4ty-d<03TFb}UUCOO7+%>bZ>5@1QAw@d_I0Q2wS$r}BgD&rL=wh_ z42FLI-dL!%>SIS_q;BX-`|U4^rwZ|bLW3Ih3o`2Rs!I9E1034mi~)e3*H`pBswnH} zZ$a*2@k(wK=-1(i!LpIojN+Uu9-f1N)V0}Tj`-0|Vx!HU6h98RzO=B1Gg)YpFk#AW z6-u0eQB#}ZZ1c}xs(>^3*usGe&Owq&cwXlM<8lQv&t1vF#tU*adyW`ryNMQo{s6_? zWJ5B_*=7X!rA@_xsxclxcXT1-e{8Dhn^$TecrA~7S`$559#ud?pm#wANqa}V0 z(Fw`Sq^BsXN`WQQNIblGxOrhm^0kyP{GP`_5CzPIG4J&Z>}m-=GsdJ_lqEEJ@UT@j z?f{MI1C8Pl7~SKG+wWzOUk`6*xv^QuFhZU499o>}t!H zpbz&Dc|dYgrgj%bHN8mG#zmOl=^lLra4HGp?H+i9Z_v zHGT1qcE4hfUla3c2bKvVp#=J`IRXFtS7<-S`L$FfDgbinqW&n*Jw%Q_F2(_bo+wJy zk$*XXzt-zv%l-@Yubcn6=l=u3Le{7c`g19oBOxVvIUTOPX6;m4%uamJLyaQVTd{;= zJ8-z$b;QR&_dPAE%Xd3C8#r60H)2Kd@tH^RPndJ$p`RDt@LPawJtlB%-HJI zPVELKgPPNGG9ciLcqp91r@*u*n1Mbq^qFTl^}nrMrgpOxmO5*Z`s6~ouV@zD6Ms&w zi3-+nK2r!FR(J#s1>~IB{8G&Azj0y+%7Oj^`@!9XnVGp%DRC*jEEs%K_Hz4`nR#_K zrRW&mizmgvqj`CwriRJ2B-#ASBf1A3NN=xZKNgFOj1~D&CLdxwxXv0{{W;$cEEd;} z=0L-ptPB~dD7hpckY+*Pq?&&>cORa5wSr%ky5#(qTmYX@_! z@fj>wYp}>sqwqdLPksl;3;EdRMjw7FgYgT%Dnx(UU#t~|IPvqA!f~tg%A=s3Zm<~5 zwlDaSCL@3>^9r~z&|uM~n@rMn{o6tjl%RQC&1?fC+CmdEB?2;Bxc!+rla2KcBavUT zGFz&Euk!Q4(`!larfBy3Mls+&gJO=6bG2=Aa|zW*t}N=rLh&Uk+2W5lzw`69U8%}<3=?yya134lPfq5 z=m6*37uh}avp!ZvHZ;!1&G4L(`thVvj6e8|Wj8iA%s<%+g$CICO>B^me?C?JvhTMHR^?#=I&vJVG|BAGbCEnow zE3A6b!)m_oYe2qv!(GkFc5^gaJfIz=v5$}ap|k&E$JJ{5$&0C~+lF^Hiej=Cvj_Fb z3(;af=~&!AOLPZnPN2jOO>ZuP1cGO(o?Zv@{a4u|3p3Fg$xW&J-O}%F{nM>4K#Nz$ zH>V~V6NGiHSJjqkDILsWzrrXIU?@TI;Lk)QG+?(UO)lT|$vX$zbl;Mq1$Ov}bj+c_ z_=$LYYuUhm&Xe`2U08UQYw}>B?yV|}o)Mtr1fL)ci6UxDEaxlsYmFR8zZsBx!k|TX z5@yIO!^rZy)ErbvDZgilfgXVVQ`yx46S3KG3&(+EB-g?6`${+8-~%*A!+%IW<#G4f z?>QccJp1VP!>1lr@GB2`9v4sfFUAHpQ@p%&?g0G`O=bnG9(SX98+4j$Qfkrr6}A4xSzX+WcD9^gP5X4j5aQJx<-&hg>wcMz4 zyf+BCL@#liKRm=bEx&yAu zmz*CI`-N*Gy2cXwE5TRip)<%M= zbA>^3zPwrG$T6?_LeN-QpcWMLWtPpc{(N{CIBfGRGsbL_Uhg`h8Cp(JUfx)T6|Tp` z;q~I(g)s@nbpf9TC_mk8)Q{zEO_ZKCiCqW$*Tj3g?wNR3o~HNri^9uV5RRq4~31G8(&(;$xUwJ^frW7uvix z8V9l2&k-QUD(Ep3eXwk=i)ter=UE2#b`)Y~E$p%2IQdF}Rza65{}-Ad7ItPvUY0_~ zf+g~Hil1iz6i}PbvavHNDXXvHet`MmA;6b5781&&0_$67Y2sLop01j}j?;N?Yo9TAP9_XqycPZFn z+HcW>F?YESXP*seECnc7L~g^8TW2(V1cIPFT1lSUflVf$osBeWj>V;SUZT~e(8NlK zVq@~)ox|xwQpZgEOuTrzHrird5rs0Yk*TQ~#A9;DU)yJJ_uDFINpE|CJ*8K$&G%I@ z;6so;Bqk;YZuVqQ;Leo2DFkN*N|TT7U9B@2Jp)S??@J&|LzS0mBIG%7EUu`~HzYew zIOmhq&(yO|m}xpedcwvswmsjSw`E^2J68Hi&=~60Yi2uNS=&iuM^vOU_;pZ^c(Ang zrZOyuqqKB_dOiI}`>EOX2_trJK_~FC6DHWJwVmo2b?mznGpp?|3#&UT^poM9(m=5s zas@>LB8(z*?jP(g=Aci*Q8^Mg!>Lv_S&ZI(#^1#TD z_GX7wv-chyOfv38nAMpLV< zP|q@;f(Dn<%ztPvYU|b5nCII-(TNW`cmV~)G(NWMkT$RQ%e{b3!gU$HE;zvY!g0V` z+Gt*N*DEMRlYaVCCzH5YQ{hAh{%-++e;GE@#y`i}j)L-k#B*8z_-vh${HQnm9zia> z1-u_#PV0}|+J6&yAiMdqVeD51eJ{q&$Mg;dD|SvuUd-@QC@JXQfMV^@Zh87Z*>fhE zjcNDSgMn+ggGN`oHG@?rgH@6#((r04^>z#5^>$=rwhQsq3+|Ki<_@Yj>BnZVnVF~Wq{UpwUEdd6AXto zke-Q58NIJq;>A%rB-Qpq6r44N;4w>PS)WSg;`x^y2k0mN!hJrIATK6RMyk_4wVpY6 z$+lLWZ6UuLV=^0mw3Wia1kw>Ux1HG#(%LT`I}7xD^;NY64NCMRF`(R-qmgd(&;$>NZT4ZY3eO1z8tZHHv{Fisr zu1K3TMJM9b;5Gw+1cCfdA#K5JpQMveRdK$WFB{u;;Hp2nm|nrV?*GiTqN?X=-n4dP z>Y6<}y*D6h>ie;cVmtJM~cC6c|#*6*2R=a-@Y$UZc_+a7hAzPsQ8ivs$fU=6Q~Pixyu9!R${Ej`j3qm z7Y{I=%%BhJvC_wpjXNC@o-zww3l0XjpQyZOg`ja4htaP=f$=JIj)q=Od5NH}5}nt= zpln(c>|Ynzt|s@Kf3)98-FC+9rGy-`mSIUM{u3peG4io})B#yKNWbv#J8w?{0$5n^ z-~VunL%t7oj@}esl!pe>OKpc9|GG~Pt+uQoF_SgFZpzk+He)W*)DMA@y7?$d1IwW} z-IbZEnP=BCuf2rJTY(vo5z*Tc**N!jsNs1Q^jkJ#wi|QtmgOAg=ROtQhH^FMFO2Kj zJ}b~bAoXX!@oTce`ykoJL2pkjCB4{$-u6_r;#Ju5v)5JHlt*{eo0kX;N!O_{2@Iv8 zsA~T_e15i!)PNJack=vP%P3F&L@)~UY}Kq!+&3&I zK3numfOxr|w~O(|`P`^*%2SHQkSFLfGE70Mj2zfIX^LW@U)#+am&qtm4V#o<%P&P{ zlG0f5sWQ-9Y)~N~!36k`k$?N6bBv7q!tl5xIyspJ9IN<*Z5ld0)Qe1To=x4LlEVm8 zl&Gp9m`o!>yDpeDMt_M3DwdPDct3^f#hRkS1V`2t^6qhod`1`MS8dqP#|+4Y6s3?KRI&Ne>H;%#`l~9kz5K^_83ROOPUojN%E3~c@55tvzpINGTg>Pv0Foiab1nR>ArjXl){UjPX5HSwhU$0~RVHQ7Az1af`GNy27L2XLaD0FG)+O7PbQgkkqQa zQb8J0P#DsG7iM}A=IIz4WX!k45r=>oh4tk8P?UIs-AQ39cFqA(GALW%sI5WQbZk+F z;u&I}iz9V}F|vaU6EqZhPBxy)9Bf{I_>R&QwgLHWL8i1gMAmn-w;3Ad-Z9o9w#**L z%)|M+n0n{InDK_Iz7d`OnG*ebU_yegy6!07KT3O9$VzHu!g#f;Ap??;x#>-v5@4Wt zpQZA?*Rxh#qyv2Lsp)x`>`Uk=3>cDL`jAjigmbF8G422OTlow|jdSh#koh+4{$cps6|3wUL zPg zS`ZvkK2XP{m`X8t4$@21&r?oe8F6*+(K(?R8XbhH-g!`%JY;&oJnJKt4DJwXy5prc zW9n%NtlK8L2eRF@imaAvI})E!-$;Ic{NxT2r0q>hFpLtOt`W4)v_!0)Z%6YN$iae# zL4ZxS@ji(*+QXu9m9?tP$r)QJu2zVk>lx3QyxC!zGX#^yV8?7yac$-XX(vAGM9JCJ zgl^zKu3zf+Dr?Tv)HmZaOOcRwUKTu(2`W9RqofQSu|8E3{YkX5OHj;Uu(*~_jz~3C2}=8;m>PsTk}k{~ZQM)a=rlh{RC-$3 z{=^}Y_e#!5YhXr<^d3qeM=`sn{;Bdpg!b+6FWOI09#x*jvawV@Dcb`E035QbuCa@ZY<}H<32qe0xP>}IjD%c+QhAy0z$gU#( zutb)+fQ~r95|%=vlcWd~+p}B4fT->PCl^HJ;KJeoAIHe8)YYvQrPJM;_Wc<)1u)YL zQpc~$V2s#bV$Awr%tA!AAx*(cQNsPl$J|WR}o> zsqZQmMRtOP?Al*Fsf;4-tAons9PV|xN~9R*Dwt)xC`wl18%LhK$&fSZ*Q-dgsxU=kU))k% zv$mg}OX^_-fgWjfY<5+iqB+Nh&2-B{Q=&F>q9pq|pRp(P5dLxFd&PC)hK0ra=472L zl@V?dLf_c{T4bvdDE^gmvJ&K)bnHCh0u(- z-}7W<^U$c(qas6T@8?>|fZ888G96KM3I zHSvepyZ9B1%b&k?97V|}hNBI6z7iNvh}A*?ua!a%Vw$pBaQ`eH*#blyOHl-#T4XZ9 z@H4s=9U4YU;KVisj-@zG65|r8db4&larB75V4K_ zu=-ZZ#p8p$?E~j{X)aW)w;kLCPtAKJ|CrZ5Xio=T*6C62B;7uYAU!Lr+%FwEpb=Kk zPp%t9tP4+w;E)2F&{NK(kIsp1^uqQjB&644g49=Wuv)_ocOUikUbEKzZ zB^TkTm@pytgFRaZKgfq@hm5jKJgb_Rx9b{pu}lRGksl!%eZ_2>IB%@!{u9W+1a!*o zk+uvaN$Iv02dUQ7L;pk(HDa|nX_8OtOmB1>yhk6(%b&m5@;sEpty@g3qKZ0s2EUf- z4uujo@FNNK5#dm@{9n&}|NSH6jV?+iNHFCD&^DLIy9TDC;19X;XPz7{+xo((se1vJ z8Q?i@(kV)2nnS!r?FZ^rJd-?r!*@tx@^@Gc3m)YS7*7=VO2uy5yvv8r;{ySRHNsQw zt?k%`YFff^6$FPS$kxzY#H`821SbG9S~(EoDMfEb7XorLs?_v+_fEpl+2 zZY@*DMnBpF-om5ukpx!a5tvabv71l-zBQW%1@OhgNV31cq?v>O7=gr-od-nEt?8Uu z%=(;&M0Mn(-jC5t0N^3e_hp{0)>S>*@7|QY`aORBCz|$xx&Jl9qxf!rLS1uc9QxK# zuBcCxW$>R&)g04wnX2r(p{d>F09l@Yd>&SoJ-C1Bzx8HD5B1M}b}f(+T$ zbsQDNV}1+K{~>oT`C~bjQ@LUio^~@n3dP7Cf^hRpiK_3jIvLfE1oV$#ON>T~qx0&W z%e8uy-aJ)$p$QAlgyv;(l%AY6#pk37L|RxGar0lEU3>GGhk};fGxj2fHFc0Q*Fv0o z_CM*Da~Yj|F|hY;rV&mpo*1_qG9F~O+A$xHF*=^aZ3*Ts#Ej^GMPYiK@|4MC+NEIK zebC0>jy0pWmS+DAGFD6dX8&wJkN?&=N}}z-n%hB)Tk#1jbo2`-FO$ILfXM>|4_WEg zZsaB-fHPVR*cmH98C26RQ&}J^9{WDA=KSzH^~C45{MVddap&L7d?$OuXiC` z!wtQgcLhA@9G`%ANQUUh82i*t6CU2 z1%62x3ZKvHUSotqiVWx-QG|H%nHiQC)@|+C!c*zdH=K`7oJ(57eb#qNM{#xcCw4OkJ6BYeM0TS@4juI_4~!R@Z{|1hxQI zOP|lWXbh5`$_+8mYryHVPStm2!UbbB|14uQd%;E!6#LAh#lSf#&=daqNC`(DCdw?U zmZXH+6OUXoP*E7-gqtv#pTaq7Cel_4cz;b7^;Bn-kC087a)b=K}F5=Ni=Fusdr<0P? zm9B)R{1z7o>tyb3pS&QRQ9U=R?u8*Drn96S-pnp&(ejL!@O6n#>E-*pPbEw$3g%2l z90X=A^x*5slJ0kI*77EV0Mcr~A~ZT=FNAGOwAGVQ3*Sdp^x*MrXc+4)uXq+P09ewh z&lxgCbqFA+Wf>OZg?!dAdkehkW+!pqGNWl?lad(r%O<5T&B}gXB^S8D62yrlf8;#A zDOvY&$|FhQgl0;+egmpeY_CQ&>35-p)f1!Yh9`T0?M%3|nI18c@E920UkMJ}d>5 zA3j+PCa0meCNYp8jIB8(MmYyat41>lI8?Tj3sg>df5J+}vniX>usCE)PJSvaofpa< zn5-N1>By1m#$tf~h7#8!u*y>?_Zb%yIO%R=rCEw=$EWlp1$1z&ZVd3Qj+B_7*T zaL(`ICPN3q!ni4c*|u;$P5$nJa|17g@RUi5F&RvdQrl^-hGDSkx}BKc_KW7#V9TX& z1I6xl2VvuvX2;-hZ3!N1^oVEVrOI34$gxzg5(^vi3aSx(g2+x4un5PoVGiN`2bvP8 zcz9eKwpBzrjk!7EY8BHiC><)*fFwML6jR{9Y%`4WEO&RQc%H34iNe2}HaaW0Jjuns z+X;UZD~n}pB#tokEGqsjmm9i32CgZAak?U$RxI|ud&Zc<7}=3z%J$c;?l2TEh}W2U zsohw{Rfq&mCdauiqx$FHG$Ipdq|(4BOhqi0M!ZUnCBAdYd53E`-2;L1VWcQi-?&Et zV_lv~8*in~fI>hgh>)NIQ-0UG%kH;`ln@BHKD*MbCs^9RZA zy*xd*6NlH?DVE<25gQ`Fpoi`;HbI6qI4`9(?YPNk&K_fnj3sk=29iS*qVAQG@Qo=h zv1azU=xHMN^elkzYqzeXUe7A-wx%3t=l`Z#W8UHE;)er4wwDek{w+N{)cT8*^;PGr z%=^ps1ao+A%velXjBoDsb@YLM=iSJL9;7Rp5otW9sAG2gQw z>KO%?o0gvz@D$n%VHq0+2YZXu8cror7)LvH5Qwg1Br@*uQkl`oVT%UL`y(`_IcN(H zO+7cfBA2_lYKDBQ%4C+Zono`>7pgH#p}}23`<%)(qBm1h;fV>}moGYb@Ec!JT(LI5 zX9J>wzakFT3UN`S?s@aKHL7vcJ2BDK6D99T>P9Eid}pltBPebDEC^&)I1 zbq6V$@xAL5NB0||tU(I>$jfKE^z0}vCqf*(3{MznLU(0ot*L#EKT36BetQGzmsFLf zJ*0`ZkcEPnXe7YnauV^=Ww_8TbnR6C2DC2qbGA$c+U>y-cFgvw*mqQ#DBD!+ zi+SXp(z$|1j>}}loq{73>Vrhd3ThR?Ud9<)=^fZ^Nr=KQBWpZ<|=Y|560`rgLAq*e{1hsIOl-W>ipTy)@cvaKxil~*)T{J@>CCFFvDR$oJ!Fm+dBGaJ=*Tx=}m4Kd~C1kAF$l z?xj{};eI_{0R2P~##-Syqd7gV3GKkrW(@SL-60VA@&d*hmwIi2J45J9; z!|(@wAJ-V`=lcj>5du+>B3#OBpLs-Z4>+@f^_w~@ak(iaKxA*oc)ey zbA&CHK5^1D{?EfklX6dDpR+@SZ~zv`7A**4u!&>JqN+E6z_Jv}%PF*jz-4;MRG;eN z>c-aYhj0rJL#YnXxreC8su?>1$x0b-D1r%@`+0mHTPK znw_YX7~_TbVx4n&6CcgxN=?kin7eHsq|p(%6nB@qi{qNtw@bPns=}h~#9Cx<% zE-x;+tE$fSW~;~s#juUQ6vtUQr4vKK!e(oDshOjrgTt;Trl(IQ=oTM8c{0;rW~89M zx`LtnSq2HYy5^?$lh!0od_CXSkc;@R9e>x9iB~ zDHoaevR=yc*somO`eEthdj&(~b|^vzQ&&5qFJoC$rs89=GYaLBa=k6ZkbI!r_m|dE z`w3_bx`R_?nPsy~B`nvp`Ww2+#I$!M_QP{1*xdP|ChtoIcqrg6J{6>=r)OocI1k`z zy}G@45f345|o=Wf)$t@!L&FkWkq{QBVaeDR%wPP315N^M+=B<;(&%;ZHhjDnFP-u#dPTGRi z&zM{^eFhaLQVd@5%HKR&s)0}~0tyQ1lqe2$Zm-ge$5i0b<`yu-+P%%{td4dv+%8|q zyLXR*RBiT+z-pwNmtur%*D$Z$%Gwj&pz0mVp4l2ye4XOCTQEbEkr%$Yth4YsZ z>3hpui{g%gy}7@%OtZ7k`6{aFXjqNLZ)ySp-FKMu6yy}v8r?YrfN5$P>d~pGe1w8% z7prBusSyA?JXLKOugjsem)G0harN1hD&6*s(S@ZY-A>oevX-KTN){?c`0*yL1HBW;op zBAXp9Jd8wng$)f=RaqjqOXh<^gIjGbJl#7r^Sc_2j)P<9w0(V0**Z8lIF^2t47HV& zJbd3xH`f}S4#|YfXAE_HeB|WlSpu|$K^HdWjA9BX?e7k4VwrbC;N~>H!H=Ywggt3~ zudaT1fs9t)okSHf`-!3)U-nB{+5~ya@7|C_x_<$#}Z`_Q>k7n^FxN$<5WgLVP}o4ve8gh%945$ zBnT=71|r+?%t~64*ZyNRPiLo3deHMQXZ8=mv3uL?E~hs+Un_0ev%r1h zDu4gs1CfK6wB-Lm!6`6-ruAJHD7mBQZ8k>Q;h_FAwbjqmd zsdaNx7B@EDr@Xq2a0yXy_8i*d@VL~JlwMy`^^EPdHy3>_JYCJzIewpWm zEj3~02nZs{PJTa{(!YMV2`L2t;Da|tPcK6#S{(Kd1;0RaqDS*LeBL&nknIGwchYY^ zp$Ucg&NsTUM|%)Zlp=fU5O)F;{M8f=DIZC^Tw8Z@V}<3kz}Z+K>e99IY@cfAxGb zNKKl_x1CLxtetDG&b$~|;tp*?=k+u^Af0G&8;%na#8uGvZTTUn({_W&gSWp0|aNnJtk3)#k=X_2k*>g%+BtfHc9q%3x-CUOjo&gLp- zGmWbIZtIPg_WFsOnpC>Fs_ONeHa7tA?BwKSb$UC{L0U$}Um5)L_w8-1@5|)D0hdm! z0|r&t|4cq5{QvsF4=Xa3Z~q?;4k(WmRCjyo6$wi>b#s$9bYP-`3Jg!7{iO{40m=hD zdV7egAE+JhNz^PSHx-kZn8;SCez{SPkCYmf3RIZ>LYS?y8g@mBc+F!31JC8Tw${>u zy<{Q8=XItO@O!988Ffk`wzVMSV7R=?^&=H;eVi*E@8ahN zerpUNXcl1IhB3jE)LNOV)oyQVD=`~Q9OP-TL4b$7b>L2=*Gx=H16b|bbN`SYN5tn3 z^o3@E8-T89rZZoiBcH~1r|1#zhsr0;&(6+f{N@$~%fiA!rLL|{>MJEDw}gXn#@#Dg zciKAY^mMt3jgIc~cWg1KbXpM7X1&ey?-=@`St4mTS&@j-5d|p>FiIb@C?6}(2^7IS zeZASP@IIhsYg5Wdm04Dm^Q&RPSs zEnrZ=%UnXnIvSa&u(o{Rzl1J~7yoL=nl5vC-T#(AU=87dXln;P0Uv3Wy+X)CfqCF+ zA(G^LiUsNzbduWfIaBl**<6XRD#X5@pNs&A9Koe{pOdl0vpSZnLF(rwQ89_Wg*vXV zf$arP2<}#X=*ttb}K?D39A?QP)4D<6LK8q%D?JmAX$tJTRR08jb zH_AIYNcibR@2S*Io*4F@}!ld8`td!I!W|2e`OY3uMiSfut+Xwsi znu+P@iHUt{so%A=bhJWQOF9d|f0witr(hLCi@}}Pr<+qxK&Dq?gUtZ+18)Iv2;ffk zanX_YH#Z@D$0wHre1w%l0zHH^k>S;W?IBfB{tSmm3w7Tul(^g5knYN;3)YO{FUE;rjlbT%e4*@?qI#jQlmBnoIX zScK#Mn1%IFc~VTGd6s$WDVf2LGz-UaYR905LzNriTa;T>nqrJP@5`zu45B;|3 zZ{R`;`9IysNfdAxn15Ouo%jzYYEVH0l6{lHP|5q_eCFc=oQS!s*90Qg|HtFB!_7^` zR34}^f|u9hZbAYL9NWX6r>7^YtK)?;p*$2+)QUo1&eV<(1zw*IpI0iCK=N4s2rLKE zF{lJYMS<;?%O@@qclb0oCA!}6Xp2!19AhKL(|b)4hif5X1eQHGQ7Vz9?xwt*ypb}h zsPNryWl0MQ$r~G);TC{`fCSCoWN2oF&PNk?kjSF&VS#}`iZ>8YkdCzkP`xY=6*mjH zaB3mPWX@EcL>fpSC+Zcbwjb9hR0^!ny`X={GNP~rOPCpF=H?vlSB8T*XTPrzIn&;k zkP8=<7Fq3Nh7$*ZaQhtMdEyx*>`b{{ZUnp_(vpb6ognOe!*O~66dAcpDCgp$<0BNZ z4)m9CA?F~jDpY%8hsXJVxW%7r3Nn-p$BOr(jmCUV7$47Lie|>uuc)rey zt9{j)5@gib-0al3DH{oo!CgiR4nbnYXP!2dW&u7o{mSEBU`td=|3m+!iT@igV(5R# zsQw4-DWL-M?ec{HJsDlSc=m0^(QN**aZkKx5l&_F(;1ThB>?8K!{Y~2k=?OQ{N10%Mg-O9ccj(^&%ZzZ ze1CAUu)tBw-ste5(o&jWqJU`-FKqT3BwiHjZ5X7?LMg%xqlFTv*Kw&x}Np~H`<>iUEwJ-tctI%5z z5kMI?siFrG%McC(UWs=}#z3_vI6ikESTxlNgaXpbcm+|ENiseSgabgaf-F~CQyl}3 z1g#o{^!s%d5(|waZe%Pt1B&_Vr1a^mN@{B|jhIhGO|1{vL9qu}Vk`U_Fd(F;sPV|n z*=5lS#umA*tEefZfvt|mYX3vr6nrh&aNhUkk-_24q4gIm<~~C0e|MB=vHuP(er?8t z=KndkOklyQbdx&XCvz!C0^Fi=Mv1VyoKcxfL?o1A|qFA#7}~-kOvuKSmrY3>q8~Cgh{j z>`A2|zV@4?xZVjxJB94`&Ja#4T|BY0=xh?}f-{aC7swx3|-}Er4CHPRBP_ z0%24d;*>*jN=kCFJq&nk6g0H#T)$LnVPx|A(TKEjqf2R$(#Kd=ciP`5LM8?v9hUu2 z*JR9(`w^k-vXabiO5O zlNPPDyJ7z7jCOw>sSgb;Nh>RxyLFUoD}B5m_hGB)nSi)7>+OR)uBbUW=)o8iO}g|n z`63NX3i}@vOqmeWgEsedS<;N^KM z#FxU;rlzE1vXauiVGzJNh~S>ZyMfjOFGz2(RqzbZh;Tg2@M7AUzBN@3%jb~ja?rtd z3@uG&{-W3IJCK5GG&EkSkCWGFb&Zh{Y6%VtseDOy-=mc=qCmAvV#CYJQ`a~Nggl{& zBOxWp*Q&6pkm^iMWS-89N*D0+H8TUZ{xI&#AL-fX@Wmd zufm5^easWtZ(kwgmMC+n|L0Od1ZW0=h*xyqY3t8t^&OLoNEDTnybo`G@!`pI#O(=1 zEr7x16}P51b6f9N8lZ41U^yL;bwBqxVGZN3r!;0L9!DzvqMyu4$}%w(n)4}{$bEX) zex3C10U?-!zYL{Y&}O$gJw6sZ>uq(eDbyNilyS6xmtdq9+7dp8Z?9T-xx0x98PCTT zXTKN3p^yxawLbdAAi}xN=hR^B{B%!rDJi2dqEQ%I0UIi~f{Kb)5V!aI7Bbxl7SLx> z7Eoj;622uWK*k;>BXv+4(T`e=_gfg&F2%jqy# zp?YFz`F-*`2s#exMC*A0DC~NwY$ki1#}de?<*`?Xd$a9UhxN(a<0Css93WU}w5ab} zpMnJ0i}=I*!is1MvM3B`-z-o<>i!&6>l={skk!dkl=!zwvlB-N%2SN6Pv&TnLl0WK zL=@qHr`b-w(MZfm8qSgIa5~QuGYkUiGdx;RhK-KwTTjH05&g0M%1QO~7|CDue6t+yeB9Y5XjWKlk^4I-K+;CIMV*IY51VbrpF_fR_B><;rxCN`v<^< zh04kOXS4&#xK_)7ow1*R^XxLOfyqObef zsx`L>Vxcg{I2|$cvC%Q#$;4UY5%w8fo8udCF)=-LSjzU1Ph>9m=O|Rh#QaT0>FxnH z5EcZ%Xf*~C(<1-;a~ifdJA{)AY-ht&r2^Pb1??3A1DAK`Y}LSBb3Q+xpJ_bc9@N5; zJ1<* zWNlH?X=Xl@-z}GrcX^lv1;fdqneAKm;!{9AdX|=kHo9U%) z$uG~wm>VKMocX=>Z$m4g3{Lb_3Pea_(3_$5CfEEQ{t)>Y0#Mi3^yp=r^Jv*5=zO`q zpWSh?1;D#4p!>$b^vQaeZP}3Ed~#4e571z@TMOThc*>X8B=E!8pT72$2&;B_e&4aa zbi+Ux=3?#0lz$9=(KszdG2gNqxH;k~K=XJ$LFrj-D=C3m405$Ny}T?hDwhYG#7ruIw z{KFb^rdvMVMmV;0rj(F!Xyfelb7HF_bK6SH&X@q}DA|iL4sPW5A(qo&!l85hdWk#3 zuoN^3|4!Bm4|ymz`YBH-$Kzk&zpE*)lKjsbQvxjfh(4Do)6p13G704QdLEbQr7(!X zWPxO+krO{khygMrU;|NxMAfZnhlht9f!=h+;nDLVb+tpKFTgP|m>C5&xGq|O6YTPj zbwyRj`{|<9Cn%^-XQ$X;F(8xOW}}6SMFfuO7@M|W-NVDi=7OqDNm*G%{IS*Pq_x}6 zACW<~Yhz{xD@>G*qNKSwPEcSEf-90BMn{*6!`6T}IMZdRtx<#R5Ri^-uQQd)Xka$xFUR5a9>Bk?<(S#oZ3x4^7Mng<6f+wY;V zik}+l9WixS-ATE*NnSxRdPVnI&8`Se`_52p&M&avz6-Nl3PI7HW%D%M@DBysZFFd= z=>`0-`|C(Im`XHS=%eS$AIo0PRPXZOVrq(h5VzjtBCYi^G8Etr{8d4lsvf4~s5y(3 zAO`6e;6~S?VQDsRFE0$*8LhS#S_giG9%%5hc0i!-%#|P@<_5;WO1)MuD{i5(6C4o|>TA+d>X3 zWfLiDvhYU3ej=|>YDX~>oTPA&U!x8V4sv1mS`att78({Vva*_%T)WnyR3`y|J8Bx5 z0M>}82r+grF30zSQQ#1bnZpB;*`ujmqdR{yj#H=wPOJ)2_{043_5m9(Hb_@&x7Cti=7p{(slND_;a+l;ab+Epw z>T+^QO85QVUHp%q*>q|lOsAPKjrv`l7X#(Wc*Jvvm!Vu8&+kqv!o8yL9q#>U^Eb&81lp`qJ~^1DEnbEc-INQYzl<#+$^^+iVM7z@0; zXwTpc=oXv>oKe@7y`N?#tSIh6i>f_Gj#X$l4)J7ZI2%{pg-rWkfR}fDer9smxw*KB zwDp#&{`%Eeqg1{cE(<>K+~OWcwQ0LtzfVcjq2d13BWQamTN*dr##Y%Iy!dfo-o0}l z{0xDit)vAN*W@-$FRHrz@rksiwH|JaZ{PY4nj1QeS?&(53ibWm&HZ#S$P*wplc@%0MczWn+U^K>XO-u-#>q-Hkt)xG7;ZDHZ;`L_A>Sc|(%=XsZs-NWS&j+43m zbv@nH>@|L?p1Xa1^#miA^^6SEFa2YNq(uEUM^qk(kTJ|qYa5-}7Lj)r!z@t>l7VAu z5rKtMc%($Mzwm93DWt}Y)*1`tKc%i!|e zBMXLvfDYi^9G5F3EmYH@I~y~R7Ap<;V~|9HIGE=3Jf zRVDm*78Mccw=gi&$TL%Po9-G?(4J;)Wvz6&2OpblLaWbe5y_wRJu3 z-PS*UQ~+=~Nk`Mt6mBaw`;--xR&^bk9To}e*BxJ<&{=8q7K+z>knrZ!n5ro8j+e?_ zPmH@YmjqR{`+G(n`y^7{4h|<^~(I-_E|F0c15QK^!cAw$p^@WnC`62t1tTA&R2LtEd$!^gp6)1t&olyYBy*XL#G>OIx3E6d5v zApC~9(t0Oun`ubkW|SbLgMp^9y2kEM4f^zAW227x>l9o~+Wf>PZXg%frk^y!Uy=)PcQ z!Fo@tAgka{dlDH4f{g=e5#l~Y6Vq#8d;ow6IahEjug)EQ3{oLFm@?f`Wv zKldiwx=$+Ao{}>)=hC(yAW%d&BO?P@XU4|LYCVff3Ofp;kHpl>Y_6Ma5gG&72;{LS z*qE4^^a!%Cv7y}rze3f+uOZx4)hmIa;^N|JX=&wA?ugDbFrp!XlhM|u-BpjLrVC?a zy6qw-rvPO*fFzcal#C||aPb!A)mN)mJ&b3AhV=&zQ_@pQNLBYYfk6!6wOYF^scXSn zg$t0Jq2lA)go%jo9uM*TO@&@_QUD2OFhkTD{{;$$qQtk!M^653e zd|aXK8SdQ?Y(dt95@`G?_Zc0P;+4NV_}h<0{09_5Jd4MUo-a8ym1Q~}8NFOZK|wMa zl_amO4w*i9%io94 zb}A<^@^$J`ud=!Om7(@-iI6`;jEPRIb|dR9NN7gd>usl4J!f?!tmZMO=r%Qs-sS23 z?G^D;BR?}ePcBbBv$~=3XxsPkX78Sim2D|G+2Rpx>pnd9y+fWenicau^wLQjNT&I1 zWN*a5_V-|A1EF}xH7qfPSQ`&1%ac)Mn!XTZ+@>c+<{9KyZE_n%SmHSd@gQbz1b8rS z(8*^K(T<7MJIw2!hEA6bUM(C>Vc5(Dh=f)EO8e+_zCn&_WfX?2nzn7cBKl4~7 z?9<#&Q#woyh^Ua&Bz!dYtgz}8Nl^<<UrobGO~ z61AeC!u@wFst9)t%oUJ?gdM{7$vU7h(WOMY4|1j({+S zuCDGxK=R`&@-drCKkLTiYzHw#x^>9V`>H>^eBcD;prk&hK+*?K*`3z&Gke zMMKNi67LxoGnrE0T_Xy0+I3h^d8S!k9c}CS>L*NUhz(;kZpt=mbI`yQW#Wz6@_6zQ zRhmHiyYN|>GnbuaKE6320UQj|{bEc*kVJ`?=o7o4*&A?rirgjkG#5lq z-5r$$XOr6QXK_yDJD4#~5r-Zo89+Snw+&?whiNQ;FRL!Ec#=2UUp1xpmZ)PZC9gxK zcsBzNd1ZeDCjAjcc31M#!Xx^=H*<`@9m@DUf#*N{g4F*33C+;KR%*@6=%=PSL7#+n zkYH2S+C1Sv|6oOimse=j8)3g<0uq3Pex6np)G5!rf+KlK-4;Jqxc~lW3avVmhi;1j z=on_7$;tr-Ygx`qTVD(^2^nr1;W0eCsZ08#Knx2rhFP_)RScL^K$xwiCN_A_hz6QM zF$`_^;yIK@SpNNm@y8(~$AL5FPSGz0{hv&tK(t+y1z|FN|7q{Lic>qj2ONK|!uO@D z*;ac=0oHD5+R#8WHdVu=N*oK!uHjR&*PMnl%6xIolT+`zHi9Mi9~PqHHrmZ}S}kP+ zR_Q|}na#E+7{Np#>3Qb6j}xChC}u;RJ{X3kKw3FlH=B1L51L@8y}2kj_~76G*m#ks z{JgT*?sfnd#=U}4ORuA-v_^k^g&n;e8)wk0JVa*3UJ5)vUNz6r>-?!|EMn|qy#H{X z6(^siH83e1VERBBk{=Yoj0p1vA;=@#H#^(=<0{5FoJDXuMv;WS0mHJD#qi zZ52DJIm~U>>M6adAg7+b&`V0lN%ksz&lXAb@_g%lS}MO&zxo{Bc1Kh^68OB9wA24m z@2(4zOEYy4+e}byJ*vgb6WbkN=xQNn=g#8K)aH@bYw@}Ksb^^_;w|%>^PSe!P7?#8 zzE-(1p&|7)PhhE5>v8sc%nC>iP_uIV-+Sb8(MZ?Txde58khhCu|LYAugbpg(3 zOn6YWTracz&+N*;%R86>;pYYKr;$aQvs4TE!^!13!EP;OF5~Hm?uK2T{pCi3<(|8+l`6V{uc- zFT$96MHO~d|9N6CK&t*JZf1rd|d(JIZXudp=e12+RB^v?CM`53c9CZ*BzP{IQtG6Z|21R2_#U6sIt@Mmfkb0-q<-6_u4CQP`kY z6aS2M6J1@u+hB-i*0%oIoWO1~9cJF%+;Je~PRZt`#(%?1QP+4#dYXQPe(rjF|CJ~B zr45CJ6!_@jVpbd0dujh(3H zYVoMqAkeA#&*Afnb}7SAz2G@8E1b&Xt|4dLE&Zikwi>^D)Zy=9OfxJEk;0a{X&BVy zL<%oI*CB-%utWQ_xsb@-^vUI^vy|D^DQF|<4|E6%sMkv1>aHw(DnwhIO&~Vx4M=gB zL~DaD#HrR!pPGhYl&HFiVz9w=c$ZUL)b~D1{ce*z3ZwW}72)eTsQ*FMM&HW@h;0+f z%$Hi`%ud{abD|)7M||BE@*WbbprKOtlp^sI5A*(k zrv&gSL9B=g`0Zfh$z3C33lTy*Lj&Rc{)+e?5rVk9LYEjA`Jf&<-YZG{_y#6{fWVG% z6N@3@GcvEF?3Oyx6dvUNB~dF9Gnp3TqguHkXx>XPt`tvc1!+0MqsV#F9M z7KK623)BGhib(vT)0@D>x2*7JcmK=6P+VLRZE^mAtc_jeG1B?*8AkYBMt*p+35ebf zKiZrvRd2TNpM4t$EX)1RBkUA1}CCIs9(#^sO~g2g$`zLqM-j1?o9lAebV|C1V+fwcRfZQ!(Q^IxD+qFH%`{hk;yP z&DA~2k1u>y%~j(Io=IK4m~B`WUbj;T(h#>DkMD=zUQ2zDf=@(|0q4hOxPnG#C>Zz9 z`d;+hB*|2|$7$$MM{k_?m^Iscu7^X1F4&9|y#b)9zu~^R{k)&5`ATnYo*zSrO1?Ro(AzZr7Bb^8(&3e-mGP zz-y3Vb=Ny6`e6a5=cgZsx!xXzKaE^;;i!DPFNdPumSRQ+{b67dyI$-F@wiiBh{Agc z4u8p+rTMh~sp$H8t`SpQG>1wTmgU^KJUFr4q=!Rr8Ai_;!5TNmL zwf(+2Ex_6BHtr}MHb+zW^L-8T>2kX(ar~zGiXb@m_u+MS_no4$^bv6B-g4fe=iA%Y zFo)mGW^V3Es{>4WgScOpmbsMBf8VJH5P@{PR?id!tR^t$L8PLTl$2rlYn+SP$zuMx zO(KI!Ws~gF2V?{2FfiwTJS53+eJ35Rtu>onA9hKH5efLmp7`E1jf{-q#qO-q^>AYF zMG`nGA^~`J;#=fK-q+xVs)Ao;p3lhhnZZu@X;*ucj(<WPKS@S#a!H^dlU3~mNYRLx{v{7r^BXJaZ6aOYdk2Deql3UdLq8wY!g7#{wq6%1d71sa-)@2 zozC3I`SUi4!M=6081lBI&w8VU-*k%?iqu0{+6t~}J|?qS1M!e;0%6F;LgPh%_Mc8m zO~*|0;2D6t1I}UC`4Sp}IMf==NlVQi*&SPx(-z9URi&8v2e^)-xWJxB);)~Rs~AK= z@7&;!d~gnC4+ksq8kx2D@w$$v&rsZ3%YLu={f9-Wxm&GNHl}<->XTibxsf+xhUpLV zMcH8LYOO>ZLfkG}F>Y^ILYn4FwH@dVDmEBQ4<)!sX(h+BbM4||T$_MTDHug5cD_-y zaNLrB`yn1Xl%k=)WC$lf&`36q8!d4(dzBs*niz8Y&7nDZcYlQE#X_xa4is=VD*sOh zPm){0=M0oO4Wwo>(+;QOIcP%tx4o$KEk17;Ve_w8RSz*$wV*JnJQ&Z9;tRj84-o<1 zs|jF_x_%0pcx>i7cv@T@i62zR6n*qm^_J=9OL&1)IX+6ej~Qz2R>KTT^hnJa0~7bL z^)lxO#7X7{_3gUA-}HX|t{3l2T3c0AK+HwF=HQo5!S?6de?i#5tdZw3BA$`PbG!}E z72)eQdsUakINw|4#5GjUp6T&4|68BV`tlpN+&8-JKLP{DxyQ+&Lr~sS2^;$43b@NlMpA}t`9W)6_;*p!`FYBnim(@4E8;` z+2gADyYu|xk=bjaFwOsY0u6G6 zdU-+XU*xG+K%s-P^eK`dIVJkSG3fe`2yy3noQQSV#pgs!3Y5pr@*|6ztVr%7AF@W| z)G0h9O0WeHOM7}|+r&L@58Fo9cms5-=Da z|CLvbsmEF8az;@Q1_mBvF+{zVoAyU&VZRk zvyb~3#lYKaK20AuUUeiQX)yw1{R9j=Gki+Ej(~!RvtN@YaY~z^Aq9E)+Odg8xcd2_ zui~6mYJJ38cbXLZy$qeteN*^6lC22Pz^GdseC+a@`%pz700Wd{|HEXo1`w%uKj zLh00$x5&QdtXCBI<4?Rv)O<@`%2P7D(_dZ@H{#m_5lQ;488xjl736vS^swu6w#?t~ z@zQB5E!pu_W%pBzu0K2@Y#EW6b;pY?F~0k=^vUmf>ZF;!E&SsmF~q6+(@K6)K6Y_q z%+1&JJDInNaH*1U9N+yV}^dvS2u$ho$ucbgZ-H9a&mBvH&urr*nAbGIKOM=c(;g0OYV zQyke=45h|lJAzNa@XPUlONX7at@YH!>E(r6>+yDO`rV3c7{$I*TW16GUU@@9ScpH3 z)q#PqHAHTq=Ri&lzi(8Rw$k0#=Yu@!A%q?%7-eh&t53}a+`rJG>lr|NKw|;eZ6}h4 zfSs(a?q+K%hkc&G`cSj?BV{lHijjhjjxZM3X<8V81qmgIWqD1F>9pW}MFq|8xzo;c z?$l;s7cW7MU?^mVnfdj0r{ShaT;N}knHY>vF0Ik)6o1{~k_vLoDfF7fZ-B%1Ic8d} z!>*~RVY;ikH;H%nnDVccXj^-ig{0( z^c;kv5{(tpq@sy{8N!u#M6YaD8nG7NXM~ZrqeLMgz9ej6Mpc#VxG9f#1gRQ#CeDo9 z-RCw$!moceEDtNZCE#Qy1(N*0N0MXaDeLcrCy!B19lSk)+8c^Qd1(NNn6R+8ykMB7 z=FutGd>)fW25$rnBt)S$LY_d2CQ^ELqYRDZC?|vy${|wI*0^F9SFaF|UR3dNvv4Vv zSMLuECTmmeK$`6E+A*Dy)klfa$j3GvLAmnn7?0=Q=&-U7AmNmr=Avc_5ZNOANrr-~ zc)~}5ro13yK{bF|k%w}3Par+Ug7xnpUk|1f%YF+e3rAK)XV!Z=Y?|tRn+gLc3VJ|3!kxjKfGfQNKu`i(b7=!IyQN1?slWH8Vr0HMVjF)tT;J6!M)SI zs*xR^JEAn$(AM4-CYo!d^|(tc$dB3aeBSWA82kA#7jzCvpk@*^(9@@-yRL{ z=ccsf@x9zd@uOcjL#bOuO(!Z>}^eAL`5;N}RaSDez7p;11^RG9R zvcwjsZm75XX&XwW1vK4+v+oNWsGH)6A5F^Yb=zh?=IgY(vaQ40ZnZPBbr_9H$O*df zPt7_YI$Wkn3|HZqqyI%mXZ&#v;^o<RL-L z_JF+nnf2$4SG8cqmPn#`|8~o>`n9A|U{qRwtcJNvRw9}r zOT{**rx(=y@&L-bWNchpioTwPQV=#Q;dfJ21oy6<^XtXdmI6K8r^J4X@wVvN&sl*D z>Z+n>kl?uZn#(S>@wq^m-$4s~}oa6z!1WtboerpA$^?a>OqMnJ8 zjC0l(l)zXwt~*F;tJ@gUvXmyl$Vp|u{MV}{g+fH1*V>xyX6JkUSI1g;M7S^RbcE?% z8)@#ob#(N-X>)5S)*A{HJXK~M)%{L;ljcS&?dvm;V~5m3CGXxTTs_>kmSgO zkB|S|ruvz4U>GS9fMt7k7&GyGM&+4$)3YZ%5+uLGz4Ju<=mlfvms^c!TCfmlbq|%#A=0U^u$RmQF5Jg9 zD|I;?TpA2PjQdK5R8scgi*uFzqYpqr?R{*r;Mycaryos%$G;A5X*>li7(w$a+$KR1 z0#2LS0>b^Xw6vt_^<{dOLWGy}7TaB$l{uUff&?glPb>c?SW&L6syag>x<(SybKyCCdzdcB*riH$=PA$8;`wQzX-E~5dE2s{aoRdJ~HE+)l9eFsMsS{Hl;ZrGh?M@ZJMrG)r{y>3=Zd#^)HanqaLp2HOk ze!i)04H2pNEc|{3q=pJ#geR@hoqjpqA@+tA z78a?$tE#N*e|32IZ3+(lk)pb2KAmmOFs$hk&~Mu1DQ>d&;e$>h!@HrDtF)7?I2itX7|P_Lzxh2UD6 z8%zZjpeL~JX@8)6{%Fw!zAt`nsd~MIv~q&s#1g~#DYIx{njOM@GVwRg8OcS*I0uGD z;zZMtf0bL)zhDdN_PyU*hJdngjLeIjh%-`aS-q`X$Z0TI(D|D?BnBdB%`W#(yNHYrPFbgtmV1<@I1NvnE3H}`}fWLix}!Rg+ur*7d-+18V#hjjK@otg=au&Gn1 zjiduWVDvXhEpyRH+G$&B+q2i}XEtZP3Gb7FgRi$`BmMobqNJLnAzzc zQ&RO|VGwC(s6VdzI5BJIZkNmDgO*ZM)GThrO7w{zN|7wM zzgL-nRro15siLA58Ya&wGn&=iyUyb!cJ-odde3>lZJ6_xyB1~p7pR{Hx+~iq?kJdK zs}jrhoIr1gdRn*Em`sw#~h=j0n!-CN{7UDhFr&L zW8<`1sg2}mI1tYCw7*1_aP9Sg7$i#gt|Jah+Q*n!;x}pgu@sU>+imJsg*2pG=@Pz_AKm> zM*V;k51o$)=~ZoRZA1$R$<39QF`+W+$#5lBepuf2er-%`+H)zu{rXGj7g6i5MxI*8 za6Z14k)o-$D!511784h0|6=aos6Zkgyxygr+8aM}p7!#L?Z7`Yy|c@Lh4YR|doo z(HpvG$nERUO(akSIcezV% z4{Cw;_3e3r{;g7QI8OIv|0ATY%O?$nx-(BTXz50eO`|3Bo!XLt35RJ9-zyx9@jvN| z!eo;R{s;fqzk8`hgld_zGzMPhKCXHFn7~p7DArouhWv^qNRm+B9(G29mDmw4W~Kj1 z7u=aA^{mL)|0`@mda{`j?^(RgeTF0J&L_hWhv2(LrlGlUB;=r6|nSefj za%wu7-vzlA?4L0wo71MzXef2Zc;wfZCIzP96NBkM-Dvc9kyN#CX3L6(OjJ_R5hI~d z@`$;pD5~k4G1`uZ1!`mt(8crD91Tx~Oi|klR|lRl@!Ctsllw%M)k}_5LfHxJx#ws3 zBN-bE>_7sh`{8zOqqsuMn?n-5tuW+3&Iru@vKzJ^FrMx!A6vW=9ly% zU-XG(kt*rUeVS>5j{+r3U`~Di@g$xfUNzLtR4?}g=Ozq#usPQ*gGhQt{7q`DmFlEtF)~d5$JOV5wE%%VCd51Y z_r2wq3|QO?glLRlKAk>WAEdss^#W=JU~jHPw6C%RWBzm%jQFC&DEUFc!DYojU9jhl zF&G*euk5jc6Vv-20-)g?)4v@y7zzRTa3)b^PL#}nK15o*Mk$C0GF9Ol@OvYpZQZr% zr_1ASapmtgoZxJRVlxX+2tp+J289p`;e(^)1I>z%2W5W`x-LsAO6&Cb3ddxG5@dxTj{ns%EAf% zKlc7IEXuBH0EQJ&2@#Z%p+V^on4wd;8M+baM!Hi#y1To(8$r5Z=#~(MmXdzY>(V>E z%jf_1zQ=L@83#_;`&?_Uwf5>oI_^Y)p}6TD_pkgZ!@t^r4|N7JNZ#oqIUP=278y}P zL@t;|pwBVGh$8tjJT?^HwiO z1eu(+^z`bcV2V1vf&5(z6O((r_phpfw9>atk;q(sEJwuiDv;G2o z+>}#O^Q;RC)RE6WNg_!qW#Xgx%t(N3j4ueC9c!3?X;M2{{hMGkF?$5}SJx$Q*`5A(snlqpSO7cf0b!L6|(ClRgIOKljtFwH^fL2aV ztd5ja5q-w1V%6G8Lmft7(>KtS%Er8e;;nuUx9%|||5pv{Tp*AlQGL=hSoEKAz|~_? z{v#7RMX*QNPuRC8)P*YRF9@tRKil7Xb#nIw$3#b?)_My(3x(hc-at*%E~DNg!s_d4 zUnU_cJVHkheb6odlOJfgwB|fQDxfEM+^3tv8T2!KMRYh^4y?%m zx>w}Y;S?cQ`U#9@c3?6o9mj;CoNki%Y`p##QH*Xq$Ib5w9a2 zM6$5kon?f3T5fG}#ct>K8dTfNap+X|oVXj>XfGp1#odo(5m#{*W1U=r{C zcP|GTo6j#`{_9~-=-CbI+m(7oap&Ql`URaGCZ z>ny&Hrr*b>-jn&DQVT(_J;AbTpx^j9uES6-6GPVQ2(B=k+Asl10}~a zbrz8HnNhw>F|8t0*SiWBs_qT3FNQIT-DciwzNSwmG*hOEmVc7ttpbj|INN1Yo+;&Y z6T2K?=Li!?8K3P+_i(tXZ@=yWx`aT4V`PMU?`VvmIa$tmFw~vU^47fBL9Ph@JtggG zCupt@Yqcs$W%2`R7)s7!6D-t(SC3C6Z#pkY42A&uW->8a#$U&6u{Bc>=zlx z5#5OUL2|zbcVi@2Aq;SV8^c9%c^r>@s$PZKEK@0?Iti!Wa2YRvtxUM?RSUP;+FMQ?|8t1D}(7wwzgh%8#}<`&-bBx zsdrx<3UeGN+I30O!wN;EC4o|}?5P6Ru=6Q9nWAtr5)xcrsU^3NQmVmPJU3X#VgzmpPMV?8nlG=kzSxtGFNVY6D$n<35XI9| zIddJ@gu8`Z^cJQ|P2azq)}J+0=TS0haIrK92%dRQd#y8%OB`fIktZ2nUO)z#a~$jm z1yP9fPCVa&i8#P`P5rAKM~Q~Zb4c|-Abc3IY5zCjB2@$7+$wZoNeOK*tg`)wx;LQ9GGARG349IocuCLFmJnj<}rKP1sCt}aQ`?ha+h2{Bk913}( z;LGKnK|#B#wxUpUea_XcAma9O$~S3!Pc0a^u;}pS-~wdMRfu>i?(}mk(+}0iJd4Sl z^=xe_$6QHC@+`pjel#LS@Z_a82hOY8WT!Cnp*4B>3|-cN)Vt=9jxK8lXR0BqcSVdy zy`m(-2!UY-(soB|vI9B*fa)#Ya~<_lHA7mq*?ktHRpHsA;6vjKH9g5q;> zpuZ7lRhjQDQ{1lCTivpo3Mm&Z-46K@$0C;90KQ&Zuk*>oa~zHshb7Ntpid(8>yBtwS@V#e+!G#Z*J zG>sToi^I=~c|>P=0obGBrY4EPAO~1>U7qqqT(|}Vn$bf#G9x9=X|UvA8j`}+euj3| zGvPenw4=5_>*jotydpO}xineYBib!oB$9dchPq(l33ig%SbghsiB7%9g`OdazRCox z#J-uwRlH~BLu0!JzT0v1_KQ@u*7}-=)Y#JKF`sRin9)gIsQ(}Vm8jB$1X^3U^}f~p z6`$$3fYQQ(3IujVe7}7iR0F1w*7feuVT~UM_6FN3oPVyX$3;a7q0Qi<1)PCrlDV2H zqwt2A(*h1IF46w%`qrq)t+=?XI}a|+`g)HixZuyR2-0XZrIFR?`Kn4vfjRS&CN7Xr zg#+x|@Z^yZR}OMiz=^IHFow?t!lk3iH5m$tknamYvVO`-9%7;qIVy=}5=Hj9IT%HyB>R<^(7ed~+!<#?6ZMYUlyD!d zCTiWt9mGLxi3f}nrls9r-KqVIMp<__sHG(bZX%j$Bizj{*CfB57HN~d)U2&&y!l2) z&#W7Sc!L1fcKp7~1JS4T{KmH5ztaj{n2Dync{9{k#f!jqd5nJgLJWm7U!#GhD2XOi zshQ~3WNW{SPAOgCa5IF?*p0S(#WXF?GEJX}<(&j63mHES%ip-p(bu_I8^yHfCA%!{ zLOu5<>(cSb-Rze$LBw#P@D^I=4RkPIIyRm&(`OnL>XSi|QsS>a28BDUEH8;?a~rqv zm5MRQXlYX{ccdyBgLlN1FD|uqp@ZtWBG7N^PsbfrrZ!qiX}W&YA)mnK)K;wC%`$y`O@ zob@}RurO7)mWs=wjX5%kr@hEJYun@#OK-hFeBOp2!i&~Q*PYRa zwQtPLTR7gn&nsB>=I2N3`+#6WDlZo*c3dZRIkS%L`J(+EeMC;QX75bS&R=jFD+N11 z=z(`33j1+h{mbD)cHjNVqV|xDa^z6AbnYpH+2?rXJ&K`ET> zA}6UPITmNJlTVT+fH3ulX~!W@)U)%bY`k#P?M~SJR;gCBSff=!u)|n_8BHXc)fm3% zc-JY6hLl^lI|Y_a-32<6XwOz=*VT!z#1nFPyhFRuo^2EyUg5pni!pqvkl*flL2P`4 zOK5z57hfDlU;Cmu&-MD-UOcCSqpX$1KFS-$TlZU#ld3{@xQ1@C{JJ2}SxB6#h#&G= zF}W6Kn`OCJA=7N;>qM4$TmgA7sZxI!!$7yxsJ9R&oxDe^JX5~%^T*BQ)s>m4Wxl|G zWBX5^-dsbMdC68(N(pOzr`?(1?5@PG~{xbEupdex8rYEk}jZxJ6Mdn;Y(+$E{?;RKRZLMKH1Q@@slIn)M8@i^IT~LsQ%DOMI5xrmLc7(* zpl|pCST%MFq9E1yQShCs$+nIk1fEsk&)O^NQSz}6z@qzYlf4A(NE+Au@-?wBGX$-| z=5io*WP|}i59QR<&|qN)=gci)FPhJ0T<#5m2hQ@-(JoxoVZmHE6V|FsuE@GZ=|h~k$q0*P@*%eoJWlI!G3$y``fpubxN5O_RGrUNo^38 z=44Z`_#j*Ru{~PF1ngAqq`@qXY2Px8S#vL<31KHzLSadJv2lGf{=D$xz~TK-SyBlY z0w~DR8XC>&WgG>f~GM;zcvBfH_eEYBxk*D;GwD-%7 z?WNF;6JV@p2)Sq&5ubC_4m4V%LjE?Kh^uVuyC~m{H>>iN4d8L4*Ece1yrT7}elqX&i7)YlhxF0U=p9DU*+00fM@2yw)t06kO~ z7qxs1$K9&Y@zeVhC%45NZsHLge6^zwImXG?N*s>H4;z&gA9;Vr5Id!|no?boxxh9$ zszeE5xMQn)!^ir?Y$MQWHf{aYUeNC$k=0}wpM}uRWF+7jIS=h1=Mj0QRia8EHrMC- zg|Lj=+S=@F_wHq$S_ou-0=x#ac%0b}OIi>MH;{}kBn_b;r`3}CafxemxGbCIHqR9l zrmdoq-`@w$oSm<=X9MA@YiPg*3rkz2ORd1+t`@^6XDsrAG12L6S!!#ogtZaa!6sqs z-3$)7O<0A@DAKS&@$c6SrP7fR$~{?3*adIJbR}Fx1ENxsk~R&UjwT+*Vq*stz+}EJ z-(GWmnqQ&{l{W!}7v90o@+hKLg=}pWnUXt?g%%OTxG|9YAE-&aEfX{~d(n^fz3j8{ zty0cuEI_YZzMuPmFzD;>Ig@pZepwiW{D z#z|~EbHf@sw`)rmdGd_*mh$!4K0a=5i08EloxxExOVHK`n3K?C^k$oUb$q3U_Q$&yl1dy4(;G!f^|AhvLMI@&xL)ub|w zQo9j159PuLjggEi?Lwwd5j^FQmw>V0ioZgo+ds;tLyh+Ad4X+QUeZ;*=iQ7LY$!i3 zQ#WS4px_f1d*v1Q&C8F{uH~%EcPJGh{3wspe!k7AAR^$LLePaoAmdt%7G3KEme~eQ zqkk(U_P=Vkc|ydwPh3ncSnyu2##pey`cbA-Q8`wx=zA1|5ib!h2|F!ijfAoH!I+27 zyIrS-=;hDVOKw7~*W#=lgsHf12w_-3dYHfG?TY2{gF&=Zbc#<&(Hg&%2V89x%!xtA zkHF^xLaRM!&S^dmW7hHIAg^iL;w0d?yMrrw~8nCz=zU5E^ zuG9jVPI>K)7k5BQ#umrgjQROb-5S)yO7CoE+g*;ZL%r5ZXFyzm_Q?xe;$1HnhWZw~ z9wW+A9ep)|_?@ked1D~;c5(z~sXZm@=K7$-Ctc$XvT|k{|Jbu*f_mn@$$j@0ix%m7 zSrC>b60XHFBPHft36e5UEdP6<4m@L-W&f`VEyS&T8;$Q+qsYpy#7WLwTJEab_Lv_3_Lcuv;09ovWgD4qs$HkrI!=Y<`J(3^odVv<@4 zM59I|yDpR!m5mQL2zS@I0!*2!O{XLb#7!kmeje)~8>=ub-dItpQ!FmNoGnct!?gjHtPUI9U*_DX~iUT*wgEq>a zZPFQ>=Xz-g7e$jI1j^mF3R&r8efOW7TxIp%K}$r+1;yx*iQob z?%nO-wk!I^06~GX+seS6N%WnCteP6T+bes(Z*muYe=3`FA|`mr>jF!GiT2lDZfEs) zk5@1zO{&`~Z)I_$!PNTOw4l&RL+PbNmh}58Hmh>=#vgaS*E3e&7LT7ts9g2!HCBfj zR_!N)I`@XV)AaMVi;XArrQx1ehxaXjf@a}nx!hD+XIYzi<#H-UM;ajyAUfv9)2r1r zxtfl}76TO#>;}aaydX!@CyPSYM#!N&bCs$9GQM$R_1wQQEnX&B)45-HSlhg@|+XFZ~!ca75 zTOnn%#@NKIv5*r%0b^|Mb-Tx|#gePrA(vHLe7vk~_^M?VhcfTaBXW1=GSb(|fkAgtQ`2}D-ko4oNXH%$oqCNJ=9493W5owhMaBFw{sXJk z=yv}D&XbLE(8TMpD=#;@z1iM$H~EP2=Teux2e6SV%e)rzwcP+6E!Cz~5xs19vq_h~ zhK{>ITh%xrKe&iGYf4t!Q=rEziOfCM~5etxNe_hvMh9lq@n;XF2D#JFSvLBF@4itT!I?W0YtsY&zU;WJc6NK`8%J(S_ zeVJl-vDu6zmC1aiUNVvhde=*YLL8D{@iT0syW_4Gn6*N&Z7M>xaUv2I0pECJR#cFO zdnQW9sYW>5>jfW$r`$DXj!Xtx%TPrT4wO!8&_yohuBGGxdWHd~8$OWXCWYop8!<*H zBRz4z6BaK%M|^qj7H0vnv~!b*{|L~Kg)-3hf-q()65nWsjEYwhUx>K zy5bpi#v9t58=YC_6eB8azn=q5q2MPFF#FHqm6oNnr7{_Q&cel(#q^u)r&qIcdAd5D z>t^LdoW@tOjp0hg%D4ME_ewIBWftD@ii#Dk7iIek^@RjzGC%_BrIGVxR{*frIh*IL zg5%BV28AJ<@9g%_T_xJWY+Gwe=)N$DQ`536aa4n_u?6j##mojuN;VDJeg4k*&Be#LZS#g9);-59|z9GjSyXFKUk6mH`->^S`zxw^@VHwJoC z%|mXi*^Th{ub*4iv#MTtJJV$xJOX;*!9s)P?`L#c&%dE^rI((GzW+>6t2@4(*0kVu zbd>oLPopwVG#sdFnBn904MEm9HE^&_(wlQGZ@;|{Nwwz^o`4gCV zkBfx^27}?nuJ=4gEOEn@NK&Hw(7^*O#v<>}oQK20IhE zGGuG4`gpfdBI$*WE7$xl`hn(g5dXs+79DVX-JV8++?RkKNzuffS<005_QkkL%F7Mo z*4O8R!ihm*id6$@74N+rkv0jJt8l=rb}}-FTG=+d+%`4GTf;z~Tr;F)S`|1kw_C=! z<+*QmQCuvV)efV;Am=Ag2DKaXEUL(21+pXQ4Nr8Fr?4CtRfr}|ZE&dRF2cxtYbqb3 zIF;l3xuJ6AwRltx=B?LmP&TXZ0BMKa{*p1*>dSJW?-5~z6u{2sD}); zQuM;;RKEW6nwaup4$E9-2}<|NR{>UJO77ixa~M8Us;g0)7C#0+?P!{@{M~^ z=JYT=lFs`Q%(9&@YaoUSch2EwiBc&}w{E*m(a4@Q%`~=DT?_8l88VOI#^5Jmc?{?T zySwFuyWaklu(9Ouq-6U;aNIjba`nP%%{IHsATQSTs>?P$078H&E$*6!` z+ah0)Q|}bmVUAj0FyMc-Ylj6fPrRy(St?k8PMjMyz3GL@5<>(hpNf7Q3bR;XwbV?H z#h#QT6w}?_q&+ir%+~e**%{6RGZsZF%9BR7+r873j?Te4lemRI#Kxth01Lv^;ZUA| zqirdaw%){XF*q=yz& z-Posu$i0fRE<(5|W3t#Lhn}#BFQg=Tgy~G$h8x#-~-Uh9hi#Z1i45sVmGU z>EdRM;{x8A>1VUfJDuIZaK?e1Xml@YgNzaIp{?R8GaWN6`4CX-K= zTDLLsWzrix@7s*ZUJwA6aK_x8%i3#y^HGrh2^E4MZY;FzX+nd7Y z6m>k$Zv28Th`AN%YVCKo=#yCs*E;&6)2XJRoj#S6|FZkwB+)~iKQERdbzuLh&(nEznD3Iw^5Op|!=Z=r0nimzM3 zWlVrnJJST4jcmR6-Cb$iC-ooO{3R~G?KK{xrL}NRM&_R#>p@a3SG&mJJK6E3%lEw( zykjYJEBDcdMPJ$tn|N+7zoUGBVF^geMHIsJwl} zqq=lXf5rXqRx*Cq!U_THMUmG!zLlFaG%URLttmc{)bf zNu(7z0^}a#`M<3rj;MltwC&GVR?6;CR8gT#w;v3o@jHHg40E4*;O)!^___}nKNiW# zT}0?Q#w4x2mTcQKVQu3iZeP;9(y%!1+_$dNI1B?ALd^Xsn>GgD}#lFRIT$rA}m-ttu z!iWLIpq&2LWvAPU&X~J9?~yVYKlY>x73$deJ>&J6=Y&OCYQ-87OPHn|Kygpe%yVM- zqnx4Kf(m16>-XJ3n<&aQRn@Vaw?LOJP4XNEJBQHZ?hRZV{q8EPPm$eD@xFd`SaNjV zDBkuj#qTvzxg|yxe9RjV7uAJ_bZh_`RrK}IK?rJrMjh95Ncnpf>yX5l8A+onDKiAL z6y2T?>vTw1VUx}4q<5LFVfbV0YYE9Ps*>>)VknD}LJ2zoJxX^_XOLl4&{;{mAxBY# zNLTe_Mi1r4H!xy~T+3jMHFQ49sMZv34+*W7>mxm_&kp#|lpB|Fjn~Xc3%`NN2ag#u zVw3Ki?!*DR+N>XM?{zsl!tcQr)-rn9{o}wo;0pn52bc}j( z?A|4*2)9~+@bN8hvgxzO6Fx+q?au6^604}uVU%x2!nddVKZj0mg?&2@(r0xzhvzM z#W0v?F972V3bjHg+|+7|lv&n`Sp4Yc?n@4jVzUb9M=w$1V}=#u;%U()B$iC!YNc8+ z?FElRZE&p0lajEL^2LV{_CTp^Bhc@~PLjyMhTc#Z8Jd;ry-%V#TS?<>xU6EH_-y+ejnrS616Ly6f?_H)$Jeb&Q1N9kM zyNIY!z~p4qY>K|PJjsvQcnFLthB}<&ULazK;d_^rjQ~}mZxHqg_eI0g$1>hpQ0y4D z+ng1N^wtofV3`_|X1_npe$BM;b6lkk zt+7)IXV8@JZD8KJ5=#jehKDWo&Cw}(+j@44F!9~2{p40 z+~H~sAi%M_Ww_)M^}&AW$MO{WmvlE52{daL7Z z&(CQ1p);hAaXEr)Rzgd*Tnfmu@wr3}9}x-%NGQi%S1PY~Fl+g^;UiyPP~EvNxhO>M zK=QgRbDHe0P}trn-l*tKSK(xO`t6TF!iD>o+3D z=6JFS`}=%Uwl+|MIAGN~@LDrA5NOInRN4cqav;=PKmiI+1+-zJ@T6D~t=5lvnOlRY z?|U?3v$Yn>Euvo%ymV;Kv`@_9Z8-t*euJsebEPR>6mN<^Cr)1tKH`*nPx|t{g47^=0`xDk~7J+Bze~FH=$KcmNJ=#0_`*YPA zdS?eU0V<;zsJIN;tuC#Z0!uMcI4s=(pfqkv_Ywy(M!=TU>t!{d1vK6<3>pW=O)fY= zP97T&OnHjlQZmBp(0X-d{Bz;TZ03;1Nlrr}8%uj&hFdpUlP3+QRZU|Wv@2hiQ;K?Fc~5Yl`6T^Y4Pg6TPj^Tn+m5FxJ^*BMU#O0 zsmaC-(f(qhXualv(WN3(c(p4Jc_% zWKo+5Q1#k92fdG?=4i>#pnb{Oim|=!txan#RQgXW3t&d&U{^K1OT! zuoz2JxF$v|X-OG>IKkp+9wcWxo7Y-@Lfds5v^0UHW|suw5Ly0+eHq2uN()5-+t9m6#mEqbf*Zhcq-m?tRrh(Nj^ zXQx=&Hc3ps`v-igSh=*MbPB1Nn26(Qdz}2te8PM#7+0kzP=bzgjyO$1H;jnO;h?j- zE8w{$Fmou&rAYZHx-kwM4dkKf3zLfK)@*XxxkV4Bvd0yMejE?QXN!45ucIBCUS836 z;|6HrKvx?jlO%b^y)|Gc8*Cy!GgH7W$rQddtO2iziDp^!6}KJU*xE3yGar6<0~DuA zgzx3ZfbgBnp(O&puFW8X=v!%yZnD{D-e3$Tb=B-ASZk3)vZmlnWL%LX8DjzG&_jc) z-cZ9uubxw%9f*4dj8l+D=Twig`?QhPb-3DD1r<)X&N}^S#1iZEY)Fp~<%qSmo*tHG zOtx{ix#15&o}QJV7F`ej6$t*wRM!TnB|c_yRKs9bV1J*>$wUrCm#o8=lDE5}7iU(6 zY{bj*$bp?P-1XtAWhS4ohS7wVOd5wIPS)GZ(oY~);d9#k!B;Vfv}K=!or8eOp@Dv4 zDR#c&T*0=u6B7Z6BtjdwN>f$JSEv|i748Yq_CC;-`*jE7l$I1)SeQ)dPpKF3owqm+ zOP*Zx_vz3c@LG;$etzN!4aflOR+EgyUA8jZ4cvm^;Kbnhn-`#ojbWP7`g|1y3aw@r zEib?5^gYrZg;EG{)0^E{j|-^O;p^t!~-Fh0<&0Z-3_om+eDL`g~jkWF#RAb-RDe1-ELpTkcn4Qi9b(n=wP_yx|EmcZ)|$Y~Dw?G!C@rgl*!LCcIb9 zM3xr%3j~Rt3E?^PSNXMk&WW^$VZIL#s_Cq6_LK;9cat_1_`>d_l=0E7<$4p%9Eqge zSM28Gr6uRNIAbznuy7apI;J()-0*jJhf|o`a2HI4qCf|7x z+70_8w6NfN)J~jB7h&y>YeMEew+xumsmt&fgV`a0=A=j6X4rngr5A+5>T77=Er3uecvo z{GIqq!Rq0xF>>Ri-CClV7(=y3j+DQ1FL!Z{zUXq~v%NgKr1EErl-mGQ}cLw4t|B(l}?obMiDVh>1uicqK2 zjY-M8iC$*$^4lkiW^~NWkQ=ALD?O_{iBh~}{@$}Vchqd!^n%&Fwu%>l6BwAE8%7P) z%7-orWXlMafW{>vtVBy@c8?r^R1DOQlHdLgyswHsM61pcVUjIlDYru{EdwuKRaN;d zS7o7b`9{@?=zbzP&s|pX$ix!ytS#UOxlhj-FgQIr|G{X?(wd80Iyfk8V7GhFTI`&3 zdZZ1f$F|g=`8md5s*>gXz6EX1r?97wdvd)+iJSXz?XnFmM4i^&cw)43LjIP%Nbhnt zJvB8pl^(mgeENEHp6k;Y>_B~;54G7lSs9NCTN?=JnNAH11WDYJw%Dlh|4V%8Gbw!f z00c&aIw+o#3pal0?h78jm#uKIdlUz#RDfxcfaYDv^i?q|OYwvHmrqH>1&<;n&I2F{ z9AgUgi8*%)g=-3qDx^Rgpee(;?qP-9t|v%;^QqKpKv|A_w)^7OR=`q*+Wdk3GekyP zlIfc-DNV`1g8{ite=Ba66mREa?AlWCz5^Wc>(77j`dDC;WYCJMEW=T|JO&y z4L0DFZl8%`5&RpR|N8J={VOl~FiDR5m*(&bmR|t11NQRC#k`H-KX)R*;0CO)RV_@@ zPk+3|iHaAU;hna|$g{sQEdKk$hms!$v&HOP=zneU8^3peMmn@&HkkOwyYn9b88kkQ z(giPm13VPL&(1CQ82St$ksRYR94#F_bg8u=Wh6qfoxYREF?dSrM@Mm6h6nj&Fh20^ zf82S{wdB80*I(#_>#hIrI&s{H>M}H@jWyk*9^p%ovV=4{JAwkv*!Q06XVT7;tSJTn zbZSrX{>C`~fLNqgt7EYBZXB8C^W-pus}^?BUJnP6iSUk;fW~-m`ggBxA@Ac%GNcen z3?|}Ny=|Bc=x0vYBPsU!Oo_Io916cs%Phlxk2pLSX0T{t+ljKf-0E4Dp~^A@vH@Fbb_7*)LiFa!5|CWO|uy=}yN|7ry<7a^lW}oJV#BeKbZy zpnA_o?Gh$JR;dbWQ{<_|NSQBUzPf>{L7|oo(-S`)A0raOS2c-EU(!9irk=K?0|kma zy2^Zxqef#Yh!c%s9~StVRH`Xfm#TM=&PSy07ssWy#IOV(C8}lTubyHZSuX5-?{+oe z6A)mhRO)}iR3~x1%nU!ueBRle`CI_&h_~DOR_-a(Vq{kchvoYQ0mg6YGSmTvjZO)i zV!Mzf*!XjB|%=;mqsbrYZ8R>u3h)6*3qhZj0hJ)CrIB1<{MnE zNzuPg@x2Gi>)T)j##=V?yUk1IyA*67_I~6kwO5ta>PLtU%aDH>*`!?iRPj|TT@U>Av{o56p z*b{;G6o@|e57z{FFlQ2(nZ-zc62qT0y^>9%Q5%XTYWWvAI#YJE3w>XDv-ycx%M0B; z@BWGP9a2s^|La>>ZGYKFE(HY3{|4VTFJ_i$W zew=95_@JQQg!=o}fLQ9_#sDPSQ`|^}KQ{BOc>%=MlO=_P|07xd1uhX1AZ~nv`+%7^ z|5@wrZ~XJXqnu%2_V6DtrxbnqmlRzkvqAY| z!3b91k%?_#0o5O}9H6nP1ZV3P9FPB4Pz)b1#A@giH^57dUZ8$PEX{IOt8 zD3DM$uSrAmuiU}^yv+$eAhC%wg=8N7vEV_Cy=?#O=jAWCaZlsEn5}UJLx(bQwV}NRd79rF4}J1d7mbg{iN(QXhc}Kq`i+?_36ht{ zW-wMz?no4y+7R6rdAfi7L5SBK-m5@iqdYo^UnUno;0Pogv#k#skQD#KEc0KCpvM5A z{PR43vh`r(4&jXdgJ75dg2A&b z`bEK?^r9aEtk)sK$ zK5UPJ`5ZmnwDaRjWTPC=4e=jZ5Jm#|!(J#^WR={9vg%jpN&-V2s}UDbe_l%b@1G+| zkuhO~QxcT7_7M=kJU=Jgcc;+rnolw0spv{^r(p+&)x;)NuiKLRQ+LDYt zB;g9rp$I-MtMj3{oplh%bC-jIkero1{Ha82rFzXE^@OBm;hxvJr>#dz3*cm*D7Mqx zS7R(rD!+=Z^0wWYP3`)_rtU~Vc1jGce`^##ZQP#S5#52 zW%|reH8So=_JfJVz9#^vbR^t8(`nU9eeqr*l zlIhP!N1A}DFxuTa@(2809W{Ugl-J$e^M5?D1spBhxWruK|3vWvgvpnH5rcz^|9?Ch zeZ!A)qUpBxf4u$w9{_{U;;*#1wzp=}78;l0Il506wg~h{y(ge|9Evu#cc?2DF6_uUeoO!;dp$KKYmq}syZAt!{G=PjnW8CP0-Gj$W zq`e%DI|Fkk@rwAK7}@bC1~nA1wHvd4;P$QUUyknOMB31(Jkr(2Or|f^se&7wIMP0s z7Zf$koEn=`*M#Ajb%Op$I1|wK+O@&Z>qtDScUfQcqnP(!P|m&2T0sW3D-+jXD_y9l zV|W=Bg8#K3qS1u#!jjCsL<>378&1UhMoKxNtRj4$tgISe9*O3^*fZUDW0y)ms_iL4~Qq_s19Fm?k z{7@0A<;M7$-1}U99HkgsoQ-Q0Q?*=3V`@T6z2bh4h9iYqp<^vjWlb;Vx)X$Cy_o%) z&@L)|R3U6c*q*9yP(xLnxu4Vt_1}2?bxhu;*kYTrtxYEN)=iIAT69ALobGH|FXHN% z-=IpU-jELR-QCbDW&h}poh+Sg5C|D&OjD1s393wuC1PbWyAU2Og7a$luvVkq#VXqm zUeyj5gVihUOLNvuAKj}_wsM`tv&i?s(0k+lc}O{a`~at;iG>tL4#m6%GmW4`R^ZOMHl)erw26F%V1Bs+*AE^no=|Os`kI>%Sv>+%nXvbZaNw9 z$Y(S*{1|_djoa7iP_#e9Dlp;g^bZ(FF!;S;quKh=sL2Ymu1-q}$#q2J=(Y1>rEQk5 z+T40=4LI+LWtOJM;nm^BzQ(flIX$B_P@SzsKTEHa#Ft;80dF8zdfOPH(l$*=D05h3 zx%@=CyJbnKjYpt4x87qfZdpTCt3vQ(e%O4|qITaK(O}m~WnNR{VQ9OmlB%79&b`Zh z)so>)s?E*$<))pGp>X_**=dGznx7YEakatAQ{mGEF7r@S&azg1&QEa`-pdSh|AgVK z&SUu%29c^J${G45A7odnLv(@DGD}!27nOq3VL@Yntcw%W2WOAIwcdN6=F2B7J@N;K zT2^1mW(06!lfQngn;&Zy&vjl1*E(Vj3v?KFaSh{)lbP(1rRf!tjPH$O%Y|su<(4Wr z*XkapXQ*H{Pw-wljz75?Z_9fenq1^0cA8wW^?QO|NVdY5w z^Y(0_hlw0N0s6^e_m$_c=gWUO zg9j=H?*=GKVNaGpR=wXf<-CJb%y)@WlWuzVO*ahq=lL&#wc&Gp%~KBLP&va97a0tP zUoOx8ToS8=2!)5)IW+U{JExTrCFOHlKMB_#Xy+3x4Y?*$dyQRH2Ku+2)U(}~DzC3} zd(v6tS)8ty_cV}(l_m#GFzBIwpEAmN9BU={HgKqWZQR06^D!;vKJ&p9AT}-aWNn?C za~PvMH&tgpqNYzvkZy~9&m~^i>TXuOQ=jvYSJxwd#p8;WVaKqQduS?CjPQqAz(0?S zNERHK6p!U(LB%b>*j_>Zp(Va4pnk9^nk|%WV6b)j3o3h!wktJb!j0v9VQNUrfU$t^ z90DcdWP5P^UN_VCa!vO-cEd#ykR>MX60s;P7&+z-=! zTodYH$5Eq=bgs3ZMQpn-GJiIE#{Ewn;BENhP7AHpLQbCK7fu&z(JiMFh|QJSG@p+x zcc!RBc6=}5WO{Y->T2db)lYb~trLL@5$`4Gufur{4%UhKJmr*}*ke`e4F`MH9QbJl zdhqV(qvI%1KD4OM?epMiOa&^>54TvHPOx%K_-O4E&Yy`h?gLVa)n*(T(=uy&F;EFe zzib6Db(0g2tHAz8pkAlOS|D&K#OR+l;8r3a;uD-jI7%m>+>uzlDIBhA-(ri$OG*L< z5?_B#G{&9&CvaZ;V~7Y~2dgn-!&=@tXyCV3_R1691ig)lp7d+4i)d*NAUq&fW1oad zirgTe~DWM4f6VWm? literal 0 HcmV?d00001 diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index 0d2b61ef067f9..33db148755d8e 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -5,14 +5,16 @@ experimental[] IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in -{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio or -Hugging Face. For built-in models and models uploaded through Eland, the {infer} -APIs offer an alternative way to use and manage trained models. However, if you -do not plan to use the {infer} APIs to use these models or if you want to use -non-NLP models, use the <>. +{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, +Google AI Studio or Hugging Face. For built-in models and models uploaded +through Eland, the {infer} APIs offer an alternative way to use and manage +trained models. However, if you do not plan to use the {infer} APIs to use these +models or if you want to use non-NLP models, use the +<>. The {infer} APIs enable you to create {infer} endpoints and use {ml} models of -different providers - such as Cohere, OpenAI, or HuggingFace - as a service. Use +different providers - such as Amazon Bedrock, Anthropic, Azure AI Studio, +Cohere, Google AI, Mistral, OpenAI, or HuggingFace - as a service. Use the following APIs to manage {infer} models and perform {infer}: * <> @@ -20,6 +22,18 @@ the following APIs to manage {infer} models and perform {infer}: * <> * <> +[[inference-landscape]] +.A representation of the Elastic inference landscape +image::images/inference-landscape.png[A representation of the Elastic inference landscape,align="center"] + +An {infer} endpoint enables you to use the corresponding {ml} model without +manual deployment and apply it to your data at ingestion time through +<>. + +Choose a model from your provider or use ELSER – a retrieval model trained by +Elastic –, then create an {infer} endpoint by the <>. +Now use <> to perform +<> on your data. include::delete-inference.asciidoc[] include::get-inference.asciidoc[] From 2e22e73cdfcae4f278f79289d4ea3094aad12cc9 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 14 Aug 2024 10:22:25 -0400 Subject: [PATCH 025/389] ESQL: Remove date_nanos from generated docs (#111884) This removes date_nanos from the docs generated for all of our functions because it's still under construction. I've done so as a sort of one-off hack. My plan is to replace this in a follow up change with a centralized registry of "under construction" data types. So we can make new data types under a feature flag more easilly in the future. We're going to be doing that a fair bit. --- .../functions/kibana/definition/mv_count.json | 12 ------------ .../functions/kibana/definition/mv_first.json | 12 ------------ .../functions/kibana/definition/mv_last.json | 12 ------------ .../functions/kibana/definition/mv_max.json | 12 ------------ .../functions/kibana/definition/mv_min.json | 12 ------------ .../esql/functions/types/mv_count.asciidoc | 1 - .../esql/functions/types/mv_first.asciidoc | 1 - .../esql/functions/types/mv_last.asciidoc | 1 - .../esql/functions/types/mv_max.asciidoc | 1 - .../esql/functions/types/mv_min.asciidoc | 1 - .../function/AbstractFunctionTestCase.java | 17 +++++++++++++++++ 11 files changed, 17 insertions(+), 65 deletions(-) diff --git a/docs/reference/esql/functions/kibana/definition/mv_count.json b/docs/reference/esql/functions/kibana/definition/mv_count.json index bcd4bab59031c..d414e5b957495 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_count.json +++ b/docs/reference/esql/functions/kibana/definition/mv_count.json @@ -40,18 +40,6 @@ "variadic" : false, "returnType" : "integer" }, - { - "params" : [ - { - "name" : "field", - "type" : "date_nanos", - "optional" : false, - "description" : "Multivalue expression." - } - ], - "variadic" : false, - "returnType" : "integer" - }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/mv_first.json b/docs/reference/esql/functions/kibana/definition/mv_first.json index 357177731fa2f..e3141e800e4ad 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_first.json +++ b/docs/reference/esql/functions/kibana/definition/mv_first.json @@ -40,18 +40,6 @@ "variadic" : false, "returnType" : "cartesian_shape" }, - { - "params" : [ - { - "name" : "field", - "type" : "date_nanos", - "optional" : false, - "description" : "Multivalue expression." - } - ], - "variadic" : false, - "returnType" : "date_nanos" - }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/mv_last.json b/docs/reference/esql/functions/kibana/definition/mv_last.json index 4b7eee256afd6..e55d66dbf8b93 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_last.json +++ b/docs/reference/esql/functions/kibana/definition/mv_last.json @@ -40,18 +40,6 @@ "variadic" : false, "returnType" : "cartesian_shape" }, - { - "params" : [ - { - "name" : "field", - "type" : "date_nanos", - "optional" : false, - "description" : "Multivalue expression." - } - ], - "variadic" : false, - "returnType" : "date_nanos" - }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/mv_max.json b/docs/reference/esql/functions/kibana/definition/mv_max.json index 9bb7d378f5ce6..0783f6d6d5cbc 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_max.json +++ b/docs/reference/esql/functions/kibana/definition/mv_max.json @@ -16,18 +16,6 @@ "variadic" : false, "returnType" : "boolean" }, - { - "params" : [ - { - "name" : "field", - "type" : "date_nanos", - "optional" : false, - "description" : "Multivalue expression." - } - ], - "variadic" : false, - "returnType" : "date_nanos" - }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/mv_min.json b/docs/reference/esql/functions/kibana/definition/mv_min.json index de9b11e88d1e0..cc23df386356e 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_min.json +++ b/docs/reference/esql/functions/kibana/definition/mv_min.json @@ -16,18 +16,6 @@ "variadic" : false, "returnType" : "boolean" }, - { - "params" : [ - { - "name" : "field", - "type" : "date_nanos", - "optional" : false, - "description" : "Multivalue expression." - } - ], - "variadic" : false, - "returnType" : "date_nanos" - }, { "params" : [ { diff --git a/docs/reference/esql/functions/types/mv_count.asciidoc b/docs/reference/esql/functions/types/mv_count.asciidoc index cec08438f38a1..8af6b76591acb 100644 --- a/docs/reference/esql/functions/types/mv_count.asciidoc +++ b/docs/reference/esql/functions/types/mv_count.asciidoc @@ -8,7 +8,6 @@ field | result boolean | integer cartesian_point | integer cartesian_shape | integer -date_nanos | integer datetime | integer double | integer geo_point | integer diff --git a/docs/reference/esql/functions/types/mv_first.asciidoc b/docs/reference/esql/functions/types/mv_first.asciidoc index 4d653e21285e4..e077c57971a4a 100644 --- a/docs/reference/esql/functions/types/mv_first.asciidoc +++ b/docs/reference/esql/functions/types/mv_first.asciidoc @@ -8,7 +8,6 @@ field | result boolean | boolean cartesian_point | cartesian_point cartesian_shape | cartesian_shape -date_nanos | date_nanos datetime | datetime double | double geo_point | geo_point diff --git a/docs/reference/esql/functions/types/mv_last.asciidoc b/docs/reference/esql/functions/types/mv_last.asciidoc index 4d653e21285e4..e077c57971a4a 100644 --- a/docs/reference/esql/functions/types/mv_last.asciidoc +++ b/docs/reference/esql/functions/types/mv_last.asciidoc @@ -8,7 +8,6 @@ field | result boolean | boolean cartesian_point | cartesian_point cartesian_shape | cartesian_shape -date_nanos | date_nanos datetime | datetime double | double geo_point | geo_point diff --git a/docs/reference/esql/functions/types/mv_max.asciidoc b/docs/reference/esql/functions/types/mv_max.asciidoc index caa67b5efe2d1..4e5f0a5e0ae89 100644 --- a/docs/reference/esql/functions/types/mv_max.asciidoc +++ b/docs/reference/esql/functions/types/mv_max.asciidoc @@ -6,7 +6,6 @@ |=== field | result boolean | boolean -date_nanos | date_nanos datetime | datetime double | double integer | integer diff --git a/docs/reference/esql/functions/types/mv_min.asciidoc b/docs/reference/esql/functions/types/mv_min.asciidoc index caa67b5efe2d1..4e5f0a5e0ae89 100644 --- a/docs/reference/esql/functions/types/mv_min.asciidoc +++ b/docs/reference/esql/functions/types/mv_min.asciidoc @@ -6,7 +6,6 @@ |=== field | result boolean | boolean -date_nanos | date_nanos datetime | datetime double | double integer | integer diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 2be32af3492c0..69c63f8388b0b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -859,6 +859,9 @@ private static void renderTypes(List argNames) throws IOException { List table = new ArrayList<>(); for (Map.Entry, DataType> sig : signatures().entrySet()) { // TODO flip to using sortedSignatures + if (shouldHideSignature(sig.getKey(), sig.getValue())) { + continue; + } if (sig.getKey().size() > argNames.size()) { // skip variadic [test] cases (but not those with optional parameters) continue; } @@ -1090,6 +1093,9 @@ private static void renderKibanaFunctionDefinition( if (sig.getKey().size() < minArgCount) { throw new IllegalArgumentException("signature " + sig.getKey() + " is missing non-optional arg for " + args); } + if (shouldHideSignature(sig.getKey(), sig.getValue())) { + continue; + } builder.startObject(); builder.startArray("params"); for (int i = 0; i < sig.getKey().size(); i++) { @@ -1275,4 +1281,15 @@ protected static void typesRequired(List suppliers) { private static boolean isAggregation() { return AbstractAggregationTestCase.class.isAssignableFrom(getTestClass()); } + + /** + * Should this particular signature be hidden from the docs even though we test it? + */ + private static boolean shouldHideSignature(List argTypes, DataType returnType) { + // DATE_NANOS are under construction and behind a feature flag. + if (returnType == DataType.DATE_NANOS) { + return true; + } + return argTypes.contains(DataType.DATE_NANOS); + } } From d3fdb36852281a4f7ed1a4dae11da9d67cf683eb Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Wed, 14 Aug 2024 15:26:54 +0100 Subject: [PATCH 026/389] [DOCS] Fix response value in esql-query-api.asciidoc (#111882) --- docs/reference/esql/esql-query-api.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc index e8cfa03e3ee88..c8c735b73d2a4 100644 --- a/docs/reference/esql/esql-query-api.asciidoc +++ b/docs/reference/esql/esql-query-api.asciidoc @@ -102,7 +102,7 @@ Column `name` and `type` for each column returned in `values`. Each object is a Column `name` and `type` for each queried column. Each object is a single column. This is only returned if `drop_null_columns` is sent with the request. -`rows`:: +`values`:: (array of arrays) Values for the search results. From a67cbd1b8e1a6fda1cfe6042a63b00873e14fbaa Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 14 Aug 2024 11:04:13 -0400 Subject: [PATCH 027/389] Esql: followup on Source serialization changes (#111804) This addresses some of the comments on #111779 which I merged early because I wanted the fix in fast. --- .../xpack/esql/core/tree/Source.java | 39 ++++++++++++------- .../esql/querydsl/query/SingleValueQuery.java | 2 +- 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/tree/Source.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/tree/Source.java index 01400e489d310..1b00ff4537603 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/tree/Source.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/tree/Source.java @@ -42,13 +42,11 @@ public static Source readFrom(S in) th if (in.readBoolean() == false) { return EMPTY; } - int line = in.readInt(); - int column = in.readInt(); - int charPositionInLine = column - 1; + SourcePositions positions = new SourcePositions(in); + int charPositionInLine = positions.column - 1; - int length = in.readInt(); - String text = sourceText(in.sourceText(), line, column, length); - return new Source(new Location(line, charPositionInLine), text); + String text = sourceText(in.sourceText(), positions.line, positions.column, positions.length); + return new Source(new Location(positions.line, charPositionInLine), text); } /** @@ -57,13 +55,11 @@ public static Source readFrom(S in) th * and there is no chance of getting a {@link PlanStreamInput}. */ public static Source readEmpty(StreamInput in) throws IOException { - if (in.readBoolean() == false) { - return EMPTY; + if (in.readBoolean()) { + // Read it and throw it away because we're always returning empty. + new SourcePositions(in); } - in.readInt(); - in.readInt(); - in.readInt(); - return Source.EMPTY; + return EMPTY; } @Override @@ -73,9 +69,7 @@ public void writeTo(StreamOutput out) throws IOException { return; } out.writeBoolean(true); - out.writeInt(location.getLineNumber()); - out.writeInt(location.getColumnNumber()); - out.writeInt(text.length()); + new SourcePositions(location.getLineNumber(), location.getColumnNumber(), text.length()).writeTo(out); } // TODO: rename to location() @@ -145,4 +139,19 @@ private static int textOffset(String query, int line, int column) { return offset; } + /** + * Offsets into the source string that we use for serialization. + */ + private record SourcePositions(int line, int column, int length) implements Writeable { + SourcePositions(StreamInput in) throws IOException { + this(in.readInt(), in.readInt(), in.readInt()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(line); + out.writeInt(column); + out.writeInt(length); + } + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java index e5998f0931f02..ad17c97e3d9ca 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java @@ -111,7 +111,7 @@ public static class Builder extends AbstractQueryBuilder { this.source = Source.readFrom(psi); } else { /* - * For things like CanMatch we serialize without the Source. But we + * For things like CanMatchNodeRequest we serialize without the Source. But we * don't use it, so that's ok. */ this.source = Source.readEmpty(in); From 595628f9ce891523bac8b1fbff197eee21f14241 Mon Sep 17 00:00:00 2001 From: Luca Belluccini Date: Wed, 14 Aug 2024 17:30:31 +0200 Subject: [PATCH 028/389] [DOCS] The logs index.mode has been renamed logsdb (#111871) --- docs/reference/index-modules.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 24149afe802a2..7232de12c8c50 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -113,7 +113,7 @@ Index mode supports the following values: `time_series`::: Index mode optimized for storage of metrics documented in <>. -`logs`::: Index mode optimized for storage of logs. It applies default sort settings on the `hostname` and `timestamp` fields and uses <>. <> on different fields is still allowed. +`logsdb`::: Index mode optimized for storage of logs. It applies default sort settings on the `hostname` and `timestamp` fields and uses <>. <> on different fields is still allowed. preview:[] [[routing-partition-size]] `index.routing_partition_size`:: From e63225ae324a27446d6e8f2c7f1434068cbe324f Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Wed, 14 Aug 2024 10:39:45 -0500 Subject: [PATCH 029/389] Fixing incorrect bulk request took time (#111863) --- docs/changelog/111863.yaml | 6 ++++++ .../rest-api-spec/test/bulk/10_basic.yml | 20 +++++++++++++++++++ .../bulk/TransportAbstractBulkAction.java | 20 +++++++++---------- .../action/bulk/TransportBulkAction.java | 14 ++++++------- .../bulk/TransportSimulateBulkAction.java | 8 ++++---- .../concurrent/DeterministicTaskQueue.java | 2 +- 6 files changed, 48 insertions(+), 22 deletions(-) create mode 100644 docs/changelog/111863.yaml diff --git a/docs/changelog/111863.yaml b/docs/changelog/111863.yaml new file mode 100644 index 0000000000000..1724cd83f984b --- /dev/null +++ b/docs/changelog/111863.yaml @@ -0,0 +1,6 @@ +pr: 111863 +summary: Fixing incorrect bulk request took time +area: Ingest Node +type: bug +issues: + - 111854 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/10_basic.yml index f4f6245603aab..403017484f121 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/10_basic.yml @@ -229,3 +229,23 @@ - match: { items.0.index.error.type: illegal_argument_exception } - match: { items.0.index.error.reason: "no write index is defined for alias [test_index]. The write index may be explicitly disabled using is_write_index=false or the alias points to multiple indices without one being designated as a write index" } +--- +"Took is not orders of magnitude off": + - requires: + cluster_features: ["gte_v8.16.0"] + reason: "Bug reporting wrong took time introduced in 8.15.0, fixed in 8.16.0" + - do: + bulk: + body: + - index: + _index: took_test + - f: 1 + - index: + _index: took_test + - f: 2 + - index: + _index: took_test + - f: 3 + - match: { errors: false } + - gte: { took: 0 } + - lte: { took: 60000 } # Making sure we have a reasonable upper bound and that we're not for example returning nanoseconds diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java index ff306cfb08745..c44ad505aea84 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java @@ -56,7 +56,7 @@ public abstract class TransportAbstractBulkAction extends HandledTransportAction protected final SystemIndices systemIndices; private final IngestService ingestService; private final IngestActionForwarder ingestForwarder; - protected final LongSupplier relativeTimeProvider; + protected final LongSupplier relativeTimeNanosProvider; protected final Executor writeExecutor; protected final Executor systemWriteExecutor; private final ActionType bulkAction; @@ -71,7 +71,7 @@ public TransportAbstractBulkAction( IngestService ingestService, IndexingPressure indexingPressure, SystemIndices systemIndices, - LongSupplier relativeTimeProvider + LongSupplier relativeTimeNanosProvider ) { super(action.name(), transportService, actionFilters, requestReader, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; @@ -83,7 +83,7 @@ public TransportAbstractBulkAction( this.systemWriteExecutor = threadPool.executor(ThreadPool.Names.SYSTEM_WRITE); this.ingestForwarder = new IngestActionForwarder(transportService); clusterService.addStateApplier(this.ingestForwarder); - this.relativeTimeProvider = relativeTimeProvider; + this.relativeTimeNanosProvider = relativeTimeNanosProvider; this.bulkAction = action; } @@ -216,7 +216,7 @@ private void processBulkIndexIngestRequest( Metadata metadata, ActionListener listener ) { - final long ingestStartTimeInNanos = System.nanoTime(); + final long ingestStartTimeInNanos = relativeTimeNanos(); final BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original); getIngestService(original).executeBulkRequest( original.numberOfActions(), @@ -230,7 +230,7 @@ private void processBulkIndexIngestRequest( logger.debug("failed to execute pipeline for a bulk request", exception); listener.onFailure(exception); } else { - long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos); + long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(relativeTimeNanos() - ingestStartTimeInNanos); BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest(); ActionListener actionListener = bulkRequestModifier.wrapActionListenerIfNeeded( ingestTookInMillis, @@ -307,12 +307,12 @@ protected IngestService getIngestService(BulkRequest request) { return ingestService; } - protected long relativeTime() { - return relativeTimeProvider.getAsLong(); + protected long relativeTimeNanos() { + return relativeTimeNanosProvider.getAsLong(); } protected long buildTookInMillis(long startTimeNanos) { - return TimeUnit.NANOSECONDS.toMillis(relativeTime() - startTimeNanos); + return TimeUnit.NANOSECONDS.toMillis(relativeTimeNanos() - startTimeNanos); } private void applyPipelinesAndDoInternalExecute( @@ -321,9 +321,9 @@ private void applyPipelinesAndDoInternalExecute( Executor executor, ActionListener listener ) { - final long relativeStartTime = threadPool.relativeTimeInMillis(); + final long relativeStartTimeNanos = relativeTimeNanos(); if (applyPipelines(task, bulkRequest, executor, listener) == false) { - doInternalExecute(task, bulkRequest, executor, listener, relativeStartTime); + doInternalExecute(task, bulkRequest, executor, listener, relativeStartTimeNanos); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 7ed21ca832e37..a695e0f5e8ab6 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -107,7 +107,7 @@ public TransportBulkAction( indexNameExpressionResolver, indexingPressure, systemIndices, - System::nanoTime + threadPool::relativeTimeInNanos ); } @@ -197,7 +197,7 @@ protected void doInternalExecute( BulkRequest bulkRequest, Executor executor, ActionListener listener, - long relativeStartTime + long relativeStartTimeNanos ) { Map indicesToAutoCreate = new HashMap<>(); Set dataStreamsToBeRolledOver = new HashSet<>(); @@ -212,7 +212,7 @@ protected void doInternalExecute( indicesToAutoCreate, dataStreamsToBeRolledOver, failureStoresToBeRolledOver, - relativeStartTime + relativeStartTimeNanos ); } @@ -309,19 +309,19 @@ protected void createMissingIndicesAndIndexData( Map indicesToAutoCreate, Set dataStreamsToBeRolledOver, Set failureStoresToBeRolledOver, - long startTime + long startTimeNanos ) { final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); // Optimizing when there are no prerequisite actions if (indicesToAutoCreate.isEmpty() && dataStreamsToBeRolledOver.isEmpty() && failureStoresToBeRolledOver.isEmpty()) { - executeBulk(task, bulkRequest, startTime, listener, executor, responses, Map.of()); + executeBulk(task, bulkRequest, startTimeNanos, listener, executor, responses, Map.of()); return; } final Map indicesThatCannotBeCreated = new HashMap<>(); Runnable executeBulkRunnable = () -> executor.execute(new ActionRunnable<>(listener) { @Override protected void doRun() { - executeBulk(task, bulkRequest, startTime, listener, executor, responses, indicesThatCannotBeCreated); + executeBulk(task, bulkRequest, startTimeNanos, listener, executor, responses, indicesThatCannotBeCreated); } }); try (RefCountingRunnable refs = new RefCountingRunnable(executeBulkRunnable)) { @@ -533,7 +533,7 @@ void executeBulk( responses, indicesThatCannotBeCreated, indexNameExpressionResolver, - relativeTimeProvider, + relativeTimeNanosProvider, startTimeNanos, listener ).run(); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java index 73505ab9e3816..a4648a7accb5a 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -68,7 +68,7 @@ public TransportSimulateBulkAction( ingestService, indexingPressure, systemIndices, - System::nanoTime + threadPool::relativeTimeInNanos ); this.indicesService = indicesService; } @@ -79,7 +79,7 @@ protected void doInternalExecute( BulkRequest bulkRequest, Executor executor, ActionListener listener, - long relativeStartTime + long relativeStartTimeNanos ) { final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); for (int i = 0; i < bulkRequest.requests.size(); i++) { @@ -105,7 +105,7 @@ protected void doInternalExecute( ); } listener.onResponse( - new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(relativeStartTime)) + new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(relativeStartTimeNanos)) ); } @@ -166,7 +166,7 @@ protected IngestService getIngestService(BulkRequest request) { } @Override - protected boolean shouldStoreFailure(String indexName, Metadata metadata, long time) { + protected boolean shouldStoreFailure(String indexName, Metadata metadata, long epochMillis) { // A simulate bulk request should not change any persistent state in the system, so we never write to the failure store return false; } diff --git a/test/framework/src/main/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueue.java b/test/framework/src/main/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueue.java index 1fac5a9917807..ddf94bc838c4e 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueue.java +++ b/test/framework/src/main/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueue.java @@ -339,7 +339,7 @@ public String toString() { @Override public long relativeTimeInNanos() { - throw new AssertionError("DeterministicTaskQueue does not support nanosecond-precision timestamps"); + return TimeValue.timeValueMillis(currentTimeMillis).nanos(); } @Override From 6ada42bcfa9bbd8c5a57131549e0d8d0e1110710 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 15 Aug 2024 01:55:18 +1000 Subject: [PATCH 030/389] Mute org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT test {p0=esql/26_aggs_bucket/friendlier BUCKET interval hourly: #110916} #111901 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 4a53f94162cc0..e0eaaa3f9640d 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -143,6 +143,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {comparison.RangeVersion SYNC} issue: https://github.com/elastic/elasticsearch/issues/111814 +- class: org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT + method: "test {p0=esql/26_aggs_bucket/friendlier BUCKET interval hourly: #110916}" + issue: https://github.com/elastic/elasticsearch/issues/111901 # Examples: # From 9940f90a5fd2131f6853af175dec11539d614fbb Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 15 Aug 2024 01:55:27 +1000 Subject: [PATCH 031/389] Mute org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT test {p0=esql/26_aggs_bucket/friendlier BUCKET interval: monthly #110916} #111902 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index e0eaaa3f9640d..87f33b60c507b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -146,6 +146,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT method: "test {p0=esql/26_aggs_bucket/friendlier BUCKET interval hourly: #110916}" issue: https://github.com/elastic/elasticsearch/issues/111901 +- class: org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT + method: "test {p0=esql/26_aggs_bucket/friendlier BUCKET interval: monthly #110916}" + issue: https://github.com/elastic/elasticsearch/issues/111902 # Examples: # From 2ded98bd2df27916aad45cc3fa5f68d75e67aa05 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Wed, 14 Aug 2024 12:12:27 -0400 Subject: [PATCH 032/389] [ESQL] Basic casting for Date Nanos (#111850) Resolves https://github.com/elastic/elasticsearch/issues/109990 For the most part, this should be straightforward. The only "decision" being made here is to truncate when casting to millisecond dates, which is what we do in the `DateUtils` library class, and seems like a sensible choice. Nothing in here needs to be controlled via the feature flag, as we already just set the type to `UNSUPPORTED` when the flag is disabled. --- .../src/main/resources/meta.csv-spec | 20 ++++----- .../function/scalar/convert/ToDatetime.java | 12 +++++- .../function/scalar/convert/ToLong.java | 3 ++ .../function/scalar/convert/ToString.java | 9 ++++ .../xpack/esql/analysis/AnalyzerTests.java | 4 +- .../expression/function/TestCaseSupplier.java | 42 +++++++++++++++++++ .../scalar/convert/ToDatetimeTests.java | 10 ++++- .../function/scalar/convert/ToLongTests.java | 4 +- .../scalar/convert/ToStringTests.java | 8 ++++ 9 files changed, 97 insertions(+), 15 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec index 7322c88386799..35c852d6ba2fe 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec @@ -92,21 +92,21 @@ double tau() "boolean to_boolean(field:boolean|keyword|text|double|long|unsigned_long|integer)" "cartesian_point to_cartesianpoint(field:cartesian_point|keyword|text)" "cartesian_shape to_cartesianshape(field:cartesian_point|cartesian_shape|keyword|text)" -"date to_datetime(field:date|keyword|text|double|long|unsigned_long|integer)" +"date to_datetime(field:date|date_nanos|keyword|text|double|long|unsigned_long|integer)" "double to_dbl(field:boolean|date|keyword|text|double|long|unsigned_long|integer|counter_double|counter_integer|counter_long)" "double to_degrees(number:double|integer|long|unsigned_long)" "double to_double(field:boolean|date|keyword|text|double|long|unsigned_long|integer|counter_double|counter_integer|counter_long)" -"date to_dt(field:date|keyword|text|double|long|unsigned_long|integer)" +"date to_dt(field:date|date_nanos|keyword|text|double|long|unsigned_long|integer)" "geo_point to_geopoint(field:geo_point|keyword|text)" "geo_shape to_geoshape(field:geo_point|geo_shape|keyword|text)" "integer to_int(field:boolean|date|keyword|text|double|long|unsigned_long|integer|counter_integer)" "integer to_integer(field:boolean|date|keyword|text|double|long|unsigned_long|integer|counter_integer)" "ip to_ip(field:ip|keyword|text)" -"long to_long(field:boolean|date|keyword|text|double|long|unsigned_long|integer|counter_integer|counter_long)" +"long to_long(field:boolean|date|date_nanos|keyword|text|double|long|unsigned_long|integer|counter_integer|counter_long)" "keyword|text to_lower(str:keyword|text)" "double to_radians(number:double|integer|long|unsigned_long)" -"keyword to_str(field:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" -"keyword to_string(field:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" +"keyword to_str(field:boolean|cartesian_point|cartesian_shape|date|date_nanos|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" +"keyword to_string(field:boolean|cartesian_point|cartesian_shape|date|date_nanos|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" "unsigned_long to_ul(field:boolean|date|keyword|text|double|long|unsigned_long|integer)" "unsigned_long to_ulong(field:boolean|date|keyword|text|double|long|unsigned_long|integer)" "unsigned_long to_unsigned_long(field:boolean|date|keyword|text|double|long|unsigned_long|integer)" @@ -215,21 +215,21 @@ to_bool |field |"boolean|keyword|text|double to_boolean |field |"boolean|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. to_cartesianpo|field |"cartesian_point|keyword|text" |Input value. The input can be a single- or multi-valued column or an expression. to_cartesiansh|field |"cartesian_point|cartesian_shape|keyword|text" |Input value. The input can be a single- or multi-valued column or an expression. -to_datetime |field |"date|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. +to_datetime |field |"date|date_nanos|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. to_dbl |field |"boolean|date|keyword|text|double|long|unsigned_long|integer|counter_double|counter_integer|counter_long" |Input value. The input can be a single- or multi-valued column or an expression. to_degrees |number |"double|integer|long|unsigned_long" |Input value. The input can be a single- or multi-valued column or an expression. to_double |field |"boolean|date|keyword|text|double|long|unsigned_long|integer|counter_double|counter_integer|counter_long" |Input value. The input can be a single- or multi-valued column or an expression. -to_dt |field |"date|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. +to_dt |field |"date|date_nanos|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. to_geopoint |field |"geo_point|keyword|text" |Input value. The input can be a single- or multi-valued column or an expression. to_geoshape |field |"geo_point|geo_shape|keyword|text" |Input value. The input can be a single- or multi-valued column or an expression. to_int |field |"boolean|date|keyword|text|double|long|unsigned_long|integer|counter_integer" |Input value. The input can be a single- or multi-valued column or an expression. to_integer |field |"boolean|date|keyword|text|double|long|unsigned_long|integer|counter_integer" |Input value. The input can be a single- or multi-valued column or an expression. to_ip |field |"ip|keyword|text" |Input value. The input can be a single- or multi-valued column or an expression. -to_long |field |"boolean|date|keyword|text|double|long|unsigned_long|integer|counter_integer|counter_long" |Input value. The input can be a single- or multi-valued column or an expression. +to_long |field |"boolean|date|date_nanos|keyword|text|double|long|unsigned_long|integer|counter_integer|counter_long" |Input value. The input can be a single- or multi-valued column or an expression. to_lower |str |"keyword|text" |String expression. If `null`, the function returns `null`. to_radians |number |"double|integer|long|unsigned_long" |Input value. The input can be a single- or multi-valued column or an expression. -to_str |field |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" |Input value. The input can be a single- or multi-valued column or an expression. -to_string |field |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" |Input value. The input can be a single- or multi-valued column or an expression. +to_str |field |"boolean|cartesian_point|cartesian_shape|date|date_nanos|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" |Input value. The input can be a single- or multi-valued column or an expression. +to_string |field |"boolean|cartesian_point|cartesian_shape|date|date_nanos|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" |Input value. The input can be a single- or multi-valued column or an expression. to_ul |field |"boolean|date|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. to_ulong |field |"boolean|date|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. to_unsigned_lo|field |"boolean|date|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java index 917abc9d77168..2c86dfbac12ce 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.compute.ann.ConvertEvaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; @@ -24,6 +25,7 @@ import java.util.Map; import static org.elasticsearch.xpack.esql.core.type.DataType.DATETIME; +import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_NANOS; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; @@ -41,6 +43,7 @@ public class ToDatetime extends AbstractConvertFunction { private static final Map EVALUATORS = Map.ofEntries( Map.entry(DATETIME, (field, source) -> field), + Map.entry(DATE_NANOS, ToDatetimeFromDateNanosEvaluator.Factory::new), Map.entry(LONG, (field, source) -> field), Map.entry(KEYWORD, ToDatetimeFromStringEvaluator.Factory::new), Map.entry(TEXT, ToDatetimeFromStringEvaluator.Factory::new), @@ -55,6 +58,8 @@ public class ToDatetime extends AbstractConvertFunction { Converts an input value to a date value. A string will only be successfully converted if it's respecting the format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. To convert dates in other formats, use <>.""", + note = "Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date is" + + "truncated, not rounded.", examples = { @Example(file = "date", tag = "to_datetime-str", explanation = """ Note that in this example, the last value in the source multi-valued field has not been converted. @@ -81,7 +86,7 @@ public ToDatetime( Source source, @Param( name = "field", - type = { "date", "keyword", "text", "double", "long", "unsigned_long", "integer" }, + type = { "date", "date_nanos", "keyword", "text", "double", "long", "unsigned_long", "integer" }, description = "Input value. The input can be a single- or multi-valued column or an expression." ) Expression field ) { @@ -121,4 +126,9 @@ protected NodeInfo info() { static long fromKeyword(BytesRef in) { return dateTimeToLong(in.utf8ToString()); } + + @ConvertEvaluator(extraName = "FromDateNanos", warnExceptions = { IllegalArgumentException.class }) + static long fromDatenanos(long in) { + return DateUtils.toMilliSeconds(in); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java index 4811051c3f488..e5f138df159cd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java @@ -26,6 +26,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.BOOLEAN; import static org.elasticsearch.xpack.esql.core.type.DataType.DATETIME; +import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_NANOS; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; @@ -42,6 +43,7 @@ public class ToLong extends AbstractConvertFunction { private static final Map EVALUATORS = Map.ofEntries( Map.entry(LONG, (fieldEval, source) -> fieldEval), Map.entry(DATETIME, (fieldEval, source) -> fieldEval), + Map.entry(DATE_NANOS, (fieldEval, source) -> fieldEval), Map.entry(BOOLEAN, ToLongFromBooleanEvaluator.Factory::new), Map.entry(KEYWORD, ToLongFromStringEvaluator.Factory::new), Map.entry(TEXT, ToLongFromStringEvaluator.Factory::new), @@ -76,6 +78,7 @@ public ToLong( type = { "boolean", "date", + "date_nanos", "keyword", "text", "double", diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java index cb9eae6b5f435..f9bc15c4d6903 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java @@ -28,6 +28,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.CARTESIAN_POINT; import static org.elasticsearch.xpack.esql.core.type.DataType.CARTESIAN_SHAPE; import static org.elasticsearch.xpack.esql.core.type.DataType.DATETIME; +import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_NANOS; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_SHAPE; @@ -40,6 +41,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.VERSION; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.dateTimeToString; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.ipToString; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.nanoTimeToString; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.numericBooleanToString; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.spatialToString; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToString; @@ -52,6 +54,7 @@ public class ToString extends AbstractConvertFunction implements EvaluatorMapper Map.entry(KEYWORD, (fieldEval, source) -> fieldEval), Map.entry(BOOLEAN, ToStringFromBooleanEvaluator.Factory::new), Map.entry(DATETIME, ToStringFromDatetimeEvaluator.Factory::new), + Map.entry(DATE_NANOS, ToStringFromDateNanosEvaluator.Factory::new), Map.entry(IP, ToStringFromIPEvaluator.Factory::new), Map.entry(DOUBLE, ToStringFromDoubleEvaluator.Factory::new), Map.entry(LONG, ToStringFromLongEvaluator.Factory::new), @@ -81,6 +84,7 @@ public ToString( "cartesian_point", "cartesian_shape", "date", + "date_nanos", "double", "geo_point", "geo_shape", @@ -141,6 +145,11 @@ static BytesRef fromDatetime(long datetime) { return new BytesRef(dateTimeToString(datetime)); } + @ConvertEvaluator(extraName = "FromDateNanos") + static BytesRef fromDateNanos(long datetime) { + return new BytesRef(nanoTimeToString(datetime)); + } + @ConvertEvaluator(extraName = "FromDouble") static BytesRef fromDouble(double dbl) { return numericBooleanToString(dbl); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 583251817d681..f663002a51d68 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -1431,8 +1431,8 @@ public void testRegexOnInt() { public void testUnsupportedTypesWithToString() { // DATE_PERIOD and TIME_DURATION types have been added, but not really patched through the engine; i.e. supported. - final String supportedTypes = - "boolean or cartesian_point or cartesian_shape or datetime or geo_point or geo_shape or ip or numeric or string or version"; + final String supportedTypes = "boolean or cartesian_point or cartesian_shape or date_nanos or datetime " + + "or geo_point or geo_shape or ip or numeric or string or version"; verifyUnsupported( "row period = 1 year | eval to_string(period)", "line 1:28: argument of [to_string(period)] must be [" + supportedTypes + "], found value [period] type [date_period]" diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java index b145cc1393943..5ef71e7ae30fb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java @@ -10,6 +10,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.logging.LogManager; @@ -625,6 +626,26 @@ public static void forUnaryDatetime( ); } + /** + * Generate positive test cases for a unary function operating on an {@link DataType#DATE_NANOS}. + */ + public static void forUnaryDateNanos( + List suppliers, + String expectedEvaluatorToString, + DataType expectedType, + Function expectedValue, + List warnings + ) { + unaryNumeric( + suppliers, + expectedEvaluatorToString, + dateNanosCases(), + expectedType, + n -> expectedValue.apply(DateUtils.toInstant((long) n)), + warnings + ); + } + /** * Generate positive test cases for a unary function operating on an {@link DataType#GEO_POINT}. */ @@ -1030,6 +1051,27 @@ public static List dateCases() { ); } + /** + * Generate cases for {@link DataType#DATE_NANOS}. + * + */ + public static List dateNanosCases() { + return List.of( + new TypedDataSupplier("<1970-01-01T00:00:00.000000000Z>", () -> 0L, DataType.DATE_NANOS), + new TypedDataSupplier("", () -> ESTestCase.randomLongBetween(0, 10 * (long) 10e11), DataType.DATE_NANOS), + new TypedDataSupplier( + "", + () -> ESTestCase.randomLongBetween(10 * (long) 10e11, Long.MAX_VALUE), + DataType.DATE_NANOS + ), + new TypedDataSupplier( + "", + () -> ESTestCase.randomLongBetween(Long.MAX_VALUE / 100 * 99, Long.MAX_VALUE), + DataType.DATE_NANOS + ) + ); + } + public static List datePeriodCases() { return List.of( new TypedDataSupplier("", () -> Period.ZERO, DataType.DATE_PERIOD, true), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java index 7025c7df4ba39..7799c3c756f23 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -37,6 +38,13 @@ public static Iterable parameters() { final List suppliers = new ArrayList<>(); TestCaseSupplier.forUnaryDatetime(suppliers, read, DataType.DATETIME, Instant::toEpochMilli, emptyList()); + TestCaseSupplier.forUnaryDateNanos( + suppliers, + "ToDatetimeFromDateNanosEvaluator[field=" + read + "]", + DataType.DATETIME, + i -> DateUtils.toMilliSeconds(DateUtils.toLong(i)), + emptyList() + ); TestCaseSupplier.forUnaryInt( suppliers, @@ -162,7 +170,7 @@ public static Iterable parameters() { ) ); - return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers, (v, p) -> "datetime or numeric or string"); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers, (v, p) -> "date_nanos or datetime or numeric or string"); } private static String randomDateString(long from, long to) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java index 6e931c802030f..4c2cf14af41e9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -43,6 +44,7 @@ public static Iterable parameters() { // datetimes TestCaseSupplier.forUnaryDatetime(suppliers, read, DataType.LONG, Instant::toEpochMilli, List.of()); + TestCaseSupplier.forUnaryDateNanos(suppliers, read, DataType.LONG, DateUtils::toLong, List.of()); // random strings that don't look like a long TestCaseSupplier.forUnaryStrings( suppliers, @@ -230,7 +232,7 @@ public static Iterable parameters() { return parameterSuppliersFromTypedDataWithDefaultChecks( true, suppliers, - (v, p) -> "boolean or counter_integer or counter_long or datetime or numeric or string" + (v, p) -> "boolean or counter_integer or counter_long or date_nanos or datetime or numeric or string" ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java index 44c7c108bdec4..0b101efa073d9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.xpack.esql.core.expression.Expression; @@ -87,6 +88,13 @@ public static Iterable parameters() { i -> new BytesRef(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(i.toEpochMilli())), List.of() ); + TestCaseSupplier.forUnaryDateNanos( + suppliers, + "ToStringFromDateNanosEvaluator[field=" + read + "]", + DataType.KEYWORD, + i -> new BytesRef(DateFieldMapper.DEFAULT_DATE_TIME_NANOS_FORMATTER.formatNanos(DateUtils.toLong(i))), + List.of() + ); TestCaseSupplier.forUnaryGeoPoint( suppliers, "ToStringFromGeoPointEvaluator[field=" + read + "]", From 3f280d0d4bae9c48c7c003867cd68fe3cdf16897 Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Wed, 14 Aug 2024 18:12:43 +0200 Subject: [PATCH 033/389] Guard test runs on SL (#111897) Condition running some newly added tests on a new capability. --- .../xpack/esql/action/EsqlCapabilities.java | 7 ++++++- .../test/esql/26_aggs_bucket.yml | 19 ++++++++++++++----- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index b1031f06a194d..3abbb655dadd3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -224,7 +224,12 @@ public enum Cap { /** * Support CIDRMatch in CombineDisjunctions rule. */ - COMBINE_DISJUNCTIVE_CIDRMATCHES; + COMBINE_DISJUNCTIVE_CIDRMATCHES, + + /** + * Consider the upper bound when computing the interval in BUCKET auto mode. + */ + BUCKET_INCLUSIVE_UPPER_BOUND; private final boolean snapshotOnly; private final FeatureFlag featureFlag; diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml index d18b6261fc1d7..7d0989a6e1886 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml @@ -1,9 +1,13 @@ --- "friendlier BUCKET interval hourly: #110916": - requires: - cluster_features: ["gte_v8.14.0"] - reason: "BUCKET extended in 8.14.0" - test_runner_features: allowed_warnings_regex + test_runner_features: [allowed_warnings_regex, capabilities] + capabilities: + - method: POST + path: /_query + parameters: [] + capabilities: [bucket_inclusive_upper_bound] + reason: "BUCKET auto mode now generates different bucket sizes" - do: indices.create: index: test_bucket @@ -86,8 +90,13 @@ "friendlier BUCKET interval: monthly #110916": - requires: cluster_features: ["gte_v8.14.0"] - reason: "BUCKET extended in 8.14.0" - test_runner_features: allowed_warnings_regex + test_runner_features: [allowed_warnings_regex, capabilities] + capabilities: + - method: POST + path: /_query + parameters: [] + capabilities: [bucket_inclusive_upper_bound] + reason: "BUCKET auto mode now generates different bucket sizes" - do: indices.create: index: test_bucket From fa2e2812d4dd34a1ccf18009b589e72d5638a908 Mon Sep 17 00:00:00 2001 From: Samiul Monir <150824886+Samiul-TheSoccerFan@users.noreply.github.com> Date: Wed, 14 Aug 2024 12:40:54 -0400 Subject: [PATCH 034/389] Adding Field caps support for Semantic Text (#111809) * Adding override function of fieldHasValue to exclude field when field is empty * updaitng unit tests for Semantic Search Text mapper * Adding yaml tests for validating field caps for Semantic Text field * Update docs/changelog/111809.yaml * Adding and updating yaml tests and changelog file * Refactor yaml test --- docs/changelog/111809.yaml | 5 + x-pack/plugin/inference/build.gradle | 2 +- .../mapper/SemanticTextFieldMapper.java | 8 +- .../mapper/SemanticTextFieldMapperTests.java | 29 ++++ .../10_semantic_text_field_mapping.yml | 147 ++++++++++++++++++ 5 files changed, 189 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/111809.yaml diff --git a/docs/changelog/111809.yaml b/docs/changelog/111809.yaml new file mode 100644 index 0000000000000..5a2f220e3a697 --- /dev/null +++ b/docs/changelog/111809.yaml @@ -0,0 +1,5 @@ +pr: 111809 +summary: Add Field caps support for Semantic Text +area: Mapping +type: enhancement +issues: [] diff --git a/x-pack/plugin/inference/build.gradle b/x-pack/plugin/inference/build.gradle index beeec94f21ebf..211b99343340d 100644 --- a/x-pack/plugin/inference/build.gradle +++ b/x-pack/plugin/inference/build.gradle @@ -12,7 +12,7 @@ apply plugin: 'elasticsearch.internal-yaml-rest-test' restResources { restApi { - include '_common', 'bulk', 'indices', 'inference', 'index', 'get', 'update', 'reindex', 'search' + include '_common', 'bulk', 'indices', 'inference', 'index', 'get', 'update', 'reindex', 'search', 'field_caps' } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java index b9b95afbf6dc6..a8c3de84572a7 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.inference.mapper; +import org.apache.lucene.index.FieldInfos; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.join.BitSetProducer; @@ -320,7 +321,7 @@ public SemanticTextFieldType( IndexVersion indexVersionCreated, Map meta ) { - super(name, false, false, false, TextSearchInfo.NONE, meta); + super(name, true, false, false, TextSearchInfo.NONE, meta); this.inferenceId = inferenceId; this.modelSettings = modelSettings; this.inferenceField = inferenceField; @@ -383,6 +384,11 @@ public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext throw new IllegalArgumentException("[semantic_text] fields do not support sorting, scripting or aggregating"); } + @Override + public boolean fieldHasValue(FieldInfos fieldInfos) { + return fieldInfos.fieldInfo(getEmbeddingsFieldName(name())) != null; + } + public QueryBuilder semanticQuery(InferenceResults inferenceResults, float boost, String queryName) { String nestedFieldPath = getChunksFieldName(name()); String inferenceResultsFieldName = getEmbeddingsFieldName(name()); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java index 1cae8d981313f..bb0691c691176 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java @@ -8,6 +8,8 @@ package org.elasticsearch.xpack.inference.mapper; import org.apache.lucene.document.FeatureField; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; @@ -63,6 +65,7 @@ import java.util.Collection; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.function.BiConsumer; @@ -130,6 +133,25 @@ protected IngestScriptSupport ingestScriptSupport() { throw new AssumptionViolatedException("not supported"); } + @Override + public MappedFieldType getMappedFieldType() { + return new SemanticTextFieldMapper.SemanticTextFieldType( + "field", + "fake-inference-id", + null, + null, + IndexVersion.current(), + Map.of() + ); + } + + @Override + protected void assertSearchable(MappedFieldType fieldType) { + assertThat(fieldType, instanceOf(SemanticTextFieldMapper.SemanticTextFieldType.class)); + assertTrue(fieldType.isIndexed()); + assertTrue(fieldType.isSearchable()); + } + public void testDefaults() throws Exception { DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); assertEquals(Strings.toString(fieldMapping(this::minimalMapping)), mapper.mappingSource().toString()); @@ -141,6 +163,13 @@ public void testDefaults() throws Exception { assertTrue(fields.isEmpty()); } + @Override + public void testFieldHasValue() { + MappedFieldType fieldType = getMappedFieldType(); + FieldInfos fieldInfos = new FieldInfos(new FieldInfo[] { getFieldInfoWithName(getEmbeddingsFieldName("field")) }); + assertTrue(fieldType.fieldHasValue(fieldInfos)); + } + public void testInferenceIdNotPresent() { Exception e = expectThrows( MapperParsingException.class, diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/10_semantic_text_field_mapping.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/10_semantic_text_field_mapping.yml index d7f7e21e6f428..3f907ae1de6cd 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/10_semantic_text_field_mapping.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/10_semantic_text_field_mapping.yml @@ -63,6 +63,82 @@ setup: - match: { "test-index.mappings.properties.sparse_field.model_settings.task_type": sparse_embedding } - length: { "test-index.mappings.properties.sparse_field": 3 } +--- +"Field caps with sparse embedding": + + - requires: + cluster_features: "gte_v8.16.0" + reason: field_caps support for semantic_text added in 8.16.0 + + - do: + field_caps: + include_empty_fields: true + index: test-index + fields: "*" + + - match: { indices: [ "test-index" ] } + - exists: fields.sparse_field + - exists: fields.dense_field + + - do: + field_caps: + include_empty_fields: false + index: test-index + fields: "*" + + - match: { indices: [ "test-index" ] } + - not_exists: fields.sparse_field + - not_exists: fields.dense_field + + - do: + index: + index: test-index + id: doc_1 + body: + sparse_field: + text: "these are not the droids you're looking for. He's free to go around" + inference: + inference_id: sparse-inference-id + model_settings: + task_type: sparse_embedding + chunks: + - text: "these are not the droids you're looking for" + embeddings: + feature_0: 1.0 + feature_1: 2.0 + feature_2: 3.0 + feature_3: 4.0 + - text: "He's free to go around" + embeddings: + feature_4: 0.1 + feature_5: 0.2 + feature_6: 0.3 + feature_7: 0.4 + refresh: true + + - do: + field_caps: + include_empty_fields: true + index: test-index + fields: "*" + + - match: { indices: [ "test-index" ] } + - exists: fields.sparse_field + - exists: fields.dense_field + - match: { fields.sparse_field.semantic_text.searchable: true } + - match: { fields.dense_field.semantic_text.searchable: true } + + - do: + field_caps: + include_empty_fields: false + index: test-index + fields: "*" + + - match: { indices: [ "test-index" ] } + - exists: fields.sparse_field + - not_exists: fields.dense_field + - match: { fields.sparse_field.semantic_text.searchable: true } + --- "Indexes dense vector document": @@ -105,6 +181,77 @@ setup: - match: { "test-index.mappings.properties.dense_field.model_settings.task_type": text_embedding } - length: { "test-index.mappings.properties.dense_field": 3 } +--- +"Field caps with text embedding": + + - requires: + cluster_features: "gte_v8.16.0" + reason: field_caps support for semantic_text added in 8.16.0 + + - do: + field_caps: + include_empty_fields: true + index: test-index + fields: "*" + + - match: { indices: [ "test-index" ] } + - exists: fields.sparse_field + - exists: fields.dense_field + + - do: + field_caps: + include_empty_fields: false + index: test-index + fields: "*" + + - match: { indices: [ "test-index" ] } + - not_exists: fields.sparse_field + - not_exists: fields.dense_field + + - do: + index: + index: test-index + id: doc_2 + body: + dense_field: + text: "these are not the droids you're looking for. He's free to go around" + inference: + inference_id: dense-inference-id + model_settings: + task_type: text_embedding + dimensions: 4 + similarity: cosine + element_type: float + chunks: + - text: "these are not the droids you're looking for" + embeddings: [ 0.04673296958208084, -0.03237321600317955, -0.02543032355606556, 0.056035321205854416 ] + - text: "He's free to go around" + embeddings: [ 0.00641461368650198, -0.0016253676731139421, -0.05126338079571724, 0.053438711911439896 ] + refresh: true + + - do: + field_caps: + include_empty_fields: true + index: test-index + fields: "*" + + - match: { indices: [ "test-index" ] } + - exists: fields.sparse_field + - exists: fields.dense_field + - match: { fields.sparse_field.semantic_text.searchable: true } + - match: { fields.dense_field.semantic_text.searchable: true } + + - do: + field_caps: + include_empty_fields: false + index: test-index + fields: "*" + + - match: { indices: [ "test-index" ] } + - not_exists: fields.sparse_field + - exists: fields.dense_field + - match: { fields.dense_field.semantic_text.searchable: true } + --- "Can't be used as a multifield": From fac9b6a21e2d971185bf67d06b3ec4ca372953e2 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Wed, 14 Aug 2024 12:59:01 -0500 Subject: [PATCH 035/389] Updating fix version for bulk api took time fix now that it has been backported (#111863) (#111899) (#111906) --- .../resources/rest-api-spec/test/bulk/10_basic.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/10_basic.yml index 403017484f121..a2dfe3784d5ae 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/10_basic.yml @@ -232,8 +232,8 @@ --- "Took is not orders of magnitude off": - requires: - cluster_features: ["gte_v8.16.0"] - reason: "Bug reporting wrong took time introduced in 8.15.0, fixed in 8.16.0" + cluster_features: ["gte_v8.15.1"] + reason: "Bug reporting wrong took time introduced in 8.15.0, fixed in 8.15.1" - do: bulk: body: From 9b008835c7417170675a4fb719690b1c49fbd4c9 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 14 Aug 2024 14:37:48 -0400 Subject: [PATCH 036/389] ESQL: Make a field for types under construction (#111904) This makes a constant that holds all our "under construction" data types and links all of the "under construction" hacks to the constant. --- .../xpack/esql/core/type/DataType.java | 21 +++++++++++++--- .../function/EsqlFunctionRegistry.java | 24 ++++++++++++------- .../function/AbstractFunctionTestCase.java | 18 +++++++++----- 3 files changed, 45 insertions(+), 18 deletions(-) diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java index 771c78213a061..065ada06bfa1e 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.FeatureFlag; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; @@ -193,6 +194,16 @@ public enum DataType { */ PARTIAL_AGG(builder().esType("partial_agg").unknownSize()); + /** + * Types that are actively being built. These types are not returned + * from Elasticsearch if their associated {@link FeatureFlag} is disabled. + * They aren't included in generated documentation. And the tests don't + * check that sending them to a function produces a sane error message. + */ + public static final Map UNDER_CONSTRUCTION = Map.ofEntries( + Map.entry(DATE_NANOS, EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG) + ); + private final String typeName; private final String name; @@ -290,10 +301,14 @@ public static DataType fromTypeName(String name) { public static DataType fromEs(String name) { DataType type = ES_TO_TYPE.get(name); - if (type == DATE_NANOS && EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG.isEnabled() == false) { - type = UNSUPPORTED; + if (type == null) { + return UNSUPPORTED; } - return type != null ? type : UNSUPPORTED; + FeatureFlag underConstruction = UNDER_CONSTRUCTION.get(type); + if (underConstruction != null && underConstruction.isEnabled() == false) { + return UNSUPPORTED; + } + return type; } public static DataType fromJava(Object value) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index 12ed6d313541a..6e23f4445b564 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -10,11 +10,11 @@ import org.elasticsearch.Build; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.util.FeatureFlag; import org.elasticsearch.xpack.esql.core.ParsingException; import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.function.Function; -import org.elasticsearch.xpack.esql.core.plugin.EsqlCorePlugin; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.Check; @@ -479,10 +479,7 @@ public static FunctionDescription description(FunctionDefinition def) { Constructor constructor = constructors[0]; FunctionInfo functionInfo = functionInfo(def); String functionDescription = functionInfo == null ? "" : functionInfo.description().replace('\n', ' '); - String[] returnType = functionInfo == null ? new String[] { "?" } : functionInfo.returnType(); - if (EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG.isEnabled() == false) { - returnType = Arrays.stream(returnType).filter(t -> "date_nanos".equals(t) == false).toArray(String[]::new); - } + String[] returnType = functionInfo == null ? new String[] { "?" } : removeUnderConstruction(functionInfo.returnType()); var params = constructor.getParameters(); // no multiple c'tors supported List args = new ArrayList<>(params.length); @@ -493,10 +490,7 @@ public static FunctionDescription description(FunctionDefinition def) { Param paramInfo = params[i].getAnnotation(Param.class); String name = paramInfo == null ? params[i].getName() : paramInfo.name(); variadic |= List.class.isAssignableFrom(params[i].getType()); - String[] type = paramInfo == null ? new String[] { "?" } : paramInfo.type(); - if (EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG.isEnabled() == false) { - type = Arrays.stream(type).filter(t -> "date_nanos".equals(t) == false).toArray(String[]::new); - } + String[] type = paramInfo == null ? new String[] { "?" } : removeUnderConstruction(paramInfo.type()); String desc = paramInfo == null ? "" : paramInfo.description().replace('\n', ' '); boolean optional = paramInfo == null ? false : paramInfo.optional(); DataType targetDataType = getTargetType(type); @@ -506,6 +500,18 @@ public static FunctionDescription description(FunctionDefinition def) { return new FunctionDescription(def.name(), args, returnType, functionDescription, variadic, isAggregation); } + /** + * Remove types that are being actively built. + */ + private static String[] removeUnderConstruction(String[] types) { + for (Map.Entry underConstruction : DataType.UNDER_CONSTRUCTION.entrySet()) { + if (underConstruction.getValue().isEnabled() == false) { + types = Arrays.stream(types).filter(t -> underConstruction.getKey().typeName().equals(t) == false).toArray(String[]::new); + } + } + return types; + } + public static FunctionInfo functionInfo(FunctionDefinition def) { var constructors = def.clazz().getConstructors(); if (constructors.length == 0) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 69c63f8388b0b..cece2badb2955 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -443,8 +443,12 @@ public static Stream validFunctionParameters() { // We don't test that functions don't take date_period or time_duration. We should. return false; } - if (t == DataType.DATE_NANOS) { - // Date nanos is still under construction + if (DataType.UNDER_CONSTRUCTION.containsKey(t)) { + /* + * Types under construction aren't checked because we're actively + * adding support for them to functions. That's *why* they are + * under construction. + */ return false; } if (t.isCounter()) { @@ -1286,10 +1290,12 @@ private static boolean isAggregation() { * Should this particular signature be hidden from the docs even though we test it? */ private static boolean shouldHideSignature(List argTypes, DataType returnType) { - // DATE_NANOS are under construction and behind a feature flag. - if (returnType == DataType.DATE_NANOS) { - return true; + for (DataType dt : DataType.UNDER_CONSTRUCTION.keySet()) { + if (returnType == dt) { + return true; + } + return argTypes.contains(dt); } - return argTypes.contains(DataType.DATE_NANOS); + return false; } } From 633326e8e0f11a61f27da92d104cfc36559e629d Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 14 Aug 2024 18:15:36 -0700 Subject: [PATCH 037/389] Add nanos support to ZonedDateTime serialization (#111689) ZonedDateTime supports nanos through Instant, yet StreamInput/StreamOutput always assumes only millisecond precision in the Instant it creates. This commit adjusts serialization of ZonedDateTime to separate seconds from nanoseconds to match the underlying representation of Instant, so that nanos are supported. closes #68292 --- docs/changelog/111689.yaml | 6 +++ .../org/elasticsearch/TransportVersions.java | 1 + .../common/io/stream/StreamInput.java | 8 +++- .../common/io/stream/StreamOutput.java | 8 +++- .../common/io/stream/AbstractStreamTests.java | 42 ++++++++++++++++++- 5 files changed, 61 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/111689.yaml diff --git a/docs/changelog/111689.yaml b/docs/changelog/111689.yaml new file mode 100644 index 0000000000000..ccb3d4d4f87c5 --- /dev/null +++ b/docs/changelog/111689.yaml @@ -0,0 +1,6 @@ +pr: 111689 +summary: Add nanos support to `ZonedDateTime` serialization +area: Infra/Core +type: enhancement +issues: + - 68292 diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index e0fab5a3e1231..1995c430472ba 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -189,6 +189,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_ORIGINAL_INDICES = def(8_719_00_0); public static final TransportVersion ML_INFERENCE_EIS_INTEGRATION_ADDED = def(8_720_00_0); public static final TransportVersion INGEST_PIPELINE_EXCEPTION_ADDED = def(8_721_00_0); + public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_722_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 60322ea89cbe8..8de49ded03a4e 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -902,7 +902,13 @@ public final Instant readOptionalInstant() throws IOException { private ZonedDateTime readZonedDateTime() throws IOException { final String timeZoneId = readString(); - return ZonedDateTime.ofInstant(Instant.ofEpochMilli(readLong()), ZoneId.of(timeZoneId)); + final Instant instant; + if (getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT)) { + instant = Instant.ofEpochSecond(readVLong(), readInt()); + } else { + instant = Instant.ofEpochMilli(readLong()); + } + return ZonedDateTime.ofInstant(instant, ZoneId.of(timeZoneId)); } private OffsetTime readOffsetTime() throws IOException { diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index f17e3ee8018a2..9d5b9a107ee6a 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -766,7 +766,13 @@ public final void writeOptionalInstant(@Nullable Instant instant) throws IOExcep o.writeByte((byte) 23); final ZonedDateTime zonedDateTime = (ZonedDateTime) v; o.writeString(zonedDateTime.getZone().getId()); - o.writeLong(zonedDateTime.toInstant().toEpochMilli()); + Instant instant = zonedDateTime.toInstant(); + if (o.getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT)) { + o.writeVLong(instant.getEpochSecond()); + o.writeInt(instant.getNano()); + } else { + o.writeLong(instant.toEpochMilli()); + } }), entry(Set.class, (o, v) -> { if (v instanceof LinkedHashSet) { diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java index 1a6f52fabbd1b..b4aa58ae13f7b 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java @@ -9,6 +9,8 @@ package org.elasticsearch.common.io.stream; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -25,6 +27,7 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.TransportVersionUtils; import java.io.EOFException; import java.io.IOException; @@ -48,6 +51,8 @@ import java.util.stream.IntStream; import java.util.stream.Stream; +import static java.time.Instant.ofEpochSecond; +import static java.time.ZonedDateTime.ofInstant; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; @@ -720,21 +725,54 @@ public void testReadAfterReachingEndOfStream() throws IOException { } } + public void testZonedDateTimeSerialization() throws IOException { + checkZonedDateTimeSerialization(TransportVersions.ZDT_NANOS_SUPPORT); + } + + public void testZonedDateTimeMillisBwcSerialization() throws IOException { + checkZonedDateTimeSerialization(TransportVersionUtils.getPreviousVersion(TransportVersions.ZDT_NANOS_SUPPORT)); + } + + public void checkZonedDateTimeSerialization(TransportVersion tv) throws IOException { + assertGenericRoundtrip(ofInstant(Instant.EPOCH, randomZone()), tv); + assertGenericRoundtrip(ofInstant(ofEpochSecond(1), randomZone()), tv); + // just want to test a large number that will use 5+ bytes + long maxEpochSecond = Integer.MAX_VALUE; + assertGenericRoundtrip(ofInstant(ofEpochSecond(maxEpochSecond), randomZone()), tv); + assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(0, maxEpochSecond)), randomZone()), tv); + assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(0, maxEpochSecond), 1_000_000), randomZone()), tv); + assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(0, maxEpochSecond), 999_000_000), randomZone()), tv); + if (tv.onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT)) { + assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(0, maxEpochSecond), 999_999_999), randomZone()), tv); + assertGenericRoundtrip( + ofInstant(ofEpochSecond(randomLongBetween(0, maxEpochSecond), randomIntBetween(0, 999_999_999)), randomZone()), + tv + ); + } + } + private void assertSerialization( CheckedConsumer outputAssertions, - CheckedConsumer inputAssertions + CheckedConsumer inputAssertions, + TransportVersion transportVersion ) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setTransportVersion(transportVersion); outputAssertions.accept(output); final StreamInput input = getStreamInput(output.bytes()); + input.setTransportVersion(transportVersion); inputAssertions.accept(input); } } private void assertGenericRoundtrip(Object original) throws IOException { + assertGenericRoundtrip(original, TransportVersion.current()); + } + + private void assertGenericRoundtrip(Object original, TransportVersion transportVersion) throws IOException { assertSerialization(output -> { output.writeGenericValue(original); }, input -> { Object read = input.readGenericValue(); assertThat(read, equalTo(original)); - }); + }, transportVersion); } } From e1170692fdb6ce11525dee11ee892e5b6b3a55a2 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 15 Aug 2024 12:14:31 +1000 Subject: [PATCH 038/389] Mute org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT test {agg-ordering.testHistogramDateTimeWithCountAndOrder_1} #111918 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 87f33b60c507b..9b81f35008f3b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -149,6 +149,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT method: "test {p0=esql/26_aggs_bucket/friendlier BUCKET interval: monthly #110916}" issue: https://github.com/elastic/elasticsearch/issues/111902 +- class: org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT + method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_1} + issue: https://github.com/elastic/elasticsearch/issues/111918 # Examples: # From 2fe7077028758a9fd621a1485b094cf70771b1b6 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 15 Aug 2024 12:14:56 +1000 Subject: [PATCH 039/389] Mute org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT test {agg-ordering.testHistogramDateTimeWithCountAndOrder_2} #111919 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 9b81f35008f3b..5e3dd4349daaf 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -152,6 +152,9 @@ tests: - class: org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_1} issue: https://github.com/elastic/elasticsearch/issues/111918 +- class: org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT + method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_2} + issue: https://github.com/elastic/elasticsearch/issues/111919 # Examples: # From c6bd898684a4f9ef88fb51c9d8636228685099a1 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 15 Aug 2024 15:32:23 +1000 Subject: [PATCH 040/389] Mute org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT test {agg-ordering.testHistogramDateTimeWithCountAndOrder_2} #111919 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 5e3dd4349daaf..5dc26b6bf03a6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -155,6 +155,9 @@ tests: - class: org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_2} issue: https://github.com/elastic/elasticsearch/issues/111919 +- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT + method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_2} + issue: https://github.com/elastic/elasticsearch/issues/111919 # Examples: # From 49b1b28681c568c12078418a1acf825a51aa2f0e Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 15 Aug 2024 15:32:32 +1000 Subject: [PATCH 041/389] Mute org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT test {date.testDateParseHaving} #111921 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 5dc26b6bf03a6..5cae7dcae3b9a 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -158,6 +158,9 @@ tests: - class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_2} issue: https://github.com/elastic/elasticsearch/issues/111919 +- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT + method: test {date.testDateParseHaving} + issue: https://github.com/elastic/elasticsearch/issues/111921 # Examples: # From afa788f7bbba86c2941f4c55cbbe28390fc339ff Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 15 Aug 2024 15:32:40 +1000 Subject: [PATCH 042/389] Mute org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT test {datetime.testDateTimeParseHaving} #111922 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 5cae7dcae3b9a..05820723e0794 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -161,6 +161,9 @@ tests: - class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT method: test {date.testDateParseHaving} issue: https://github.com/elastic/elasticsearch/issues/111921 +- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT + method: test {datetime.testDateTimeParseHaving} + issue: https://github.com/elastic/elasticsearch/issues/111922 # Examples: # From bbdf64d64ddfde6740cb260d0b3deda4712e293f Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 15 Aug 2024 15:32:48 +1000 Subject: [PATCH 043/389] Mute org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT test {agg-ordering.testHistogramDateTimeWithCountAndOrder_1} #111918 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 05820723e0794..26629f56e2f2c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -164,6 +164,9 @@ tests: - class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT method: test {datetime.testDateTimeParseHaving} issue: https://github.com/elastic/elasticsearch/issues/111922 +- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT + method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_1} + issue: https://github.com/elastic/elasticsearch/issues/111918 # Examples: # From 5411befe4c94fa2ba662e2a8ff61f614a9c3a410 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 15 Aug 2024 16:03:51 +1000 Subject: [PATCH 044/389] Mute org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT #111923 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 26629f56e2f2c..7100b01b5d51c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -167,6 +167,8 @@ tests: - class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_1} issue: https://github.com/elastic/elasticsearch/issues/111918 +- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT + issue: https://github.com/elastic/elasticsearch/issues/111923 # Examples: # From 23e659b804990dc8e4e92368f87593850e422c03 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 15 Aug 2024 16:12:44 +1000 Subject: [PATCH 045/389] Mute org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT test {datetime.testDateTimeParseHaving} #111922 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 7100b01b5d51c..fe80545c05868 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -169,6 +169,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/111918 - class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT issue: https://github.com/elastic/elasticsearch/issues/111923 +- class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT + method: test {datetime.testDateTimeParseHaving} + issue: https://github.com/elastic/elasticsearch/issues/111922 # Examples: # From 466760b196328940bfc44fbd3fc472b0bf86136c Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 15 Aug 2024 16:12:55 +1000 Subject: [PATCH 046/389] Mute org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT test {agg-ordering.testHistogramDateTimeWithCountAndOrder_1} #111918 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index fe80545c05868..cb5320808d7b6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -172,6 +172,9 @@ tests: - class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT method: test {datetime.testDateTimeParseHaving} issue: https://github.com/elastic/elasticsearch/issues/111922 +- class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT + method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_1} + issue: https://github.com/elastic/elasticsearch/issues/111918 # Examples: # From c0d03e67e9ed9d31f586ef4e7612a82df3fb3cd0 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 15 Aug 2024 16:13:04 +1000 Subject: [PATCH 047/389] Mute org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT test {date.testDateParseHaving} #111921 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index cb5320808d7b6..c7c39b9ca9e60 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -175,6 +175,9 @@ tests: - class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_1} issue: https://github.com/elastic/elasticsearch/issues/111918 +- class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT + method: test {date.testDateParseHaving} + issue: https://github.com/elastic/elasticsearch/issues/111921 # Examples: # From 504efd92f164f151cf4a86c6a66c9ac3ca477491 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 15 Aug 2024 16:13:13 +1000 Subject: [PATCH 048/389] Mute org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT test {agg-ordering.testHistogramDateTimeWithCountAndOrder_2} #111919 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index c7c39b9ca9e60..76dc0da86de42 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -178,6 +178,9 @@ tests: - class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT method: test {date.testDateParseHaving} issue: https://github.com/elastic/elasticsearch/issues/111921 +- class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT + method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_2} + issue: https://github.com/elastic/elasticsearch/issues/111919 # Examples: # From 15890e1e6760264c9d0cf023836f52d999835281 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 15 Aug 2024 17:01:11 +1000 Subject: [PATCH 049/389] Mute org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT #111923 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 76dc0da86de42..4534d20d2f9ac 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -181,6 +181,8 @@ tests: - class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_2} issue: https://github.com/elastic/elasticsearch/issues/111919 +- class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT + issue: https://github.com/elastic/elasticsearch/issues/111923 # Examples: # From 5934190539d5938e9a856cebcc55daee745b4565 Mon Sep 17 00:00:00 2001 From: Nick Tindall Date: Thu, 15 Aug 2024 17:02:43 +1000 Subject: [PATCH 050/389] Add additional BlobCacheMetrics, expose BlobCacheMetrics via SharedBlobCacheService (#111730) Relates: ES-9067 --- .../blobcache/BlobCacheMetrics.java | 106 +++++++++++++++++- .../blobcache/CachePopulationSource.java | 26 +++++ .../shared/SharedBlobCacheService.java | 4 + .../blobcache/BlobCacheMetricsTests.java | 80 +++++++++++++ 4 files changed, 215 insertions(+), 1 deletion(-) create mode 100644 x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/CachePopulationSource.java create mode 100644 x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/BlobCacheMetricsTests.java diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java index e92aa89022f35..075621e8cdccb 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java @@ -7,15 +7,43 @@ package org.elasticsearch.blobcache; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.telemetry.TelemetryProvider; +import org.elasticsearch.telemetry.metric.DoubleHistogram; import org.elasticsearch.telemetry.metric.LongCounter; import org.elasticsearch.telemetry.metric.LongHistogram; import org.elasticsearch.telemetry.metric.MeterRegistry; +import java.util.Map; +import java.util.concurrent.TimeUnit; + public class BlobCacheMetrics { + private static final Logger logger = LogManager.getLogger(BlobCacheMetrics.class); + + private static final double BYTES_PER_NANOSECONDS_TO_MEBIBYTES_PER_SECOND = 1e9D / (1 << 20); + public static final String CACHE_POPULATION_REASON_ATTRIBUTE_KEY = "reason"; + public static final String CACHE_POPULATION_SOURCE_ATTRIBUTE_KEY = "source"; + public static final String SHARD_ID_ATTRIBUTE_KEY = "shard_id"; + public static final String INDEX_ATTRIBUTE_KEY = "index_name"; + private final LongCounter cacheMissCounter; private final LongCounter evictedCountNonZeroFrequency; private final LongHistogram cacheMissLoadTimes; + private final DoubleHistogram cachePopulationThroughput; + private final LongCounter cachePopulationBytes; + private final LongCounter cachePopulationTime; + + public enum CachePopulationReason { + /** + * When warming the cache + */ + Warming, + /** + * When the data we need is not in the cache + */ + CacheMiss + } public BlobCacheMetrics(MeterRegistry meterRegistry) { this( @@ -33,14 +61,39 @@ public BlobCacheMetrics(MeterRegistry meterRegistry) { "es.blob_cache.cache_miss_load_times.histogram", "The time in milliseconds for populating entries in the blob store resulting from a cache miss, expressed as a histogram.", "ms" + ), + meterRegistry.registerDoubleHistogram( + "es.blob_cache.population.throughput.histogram", + "The throughput observed when populating the the cache", + "MiB/second" + ), + meterRegistry.registerLongCounter( + "es.blob_cache.population.bytes.total", + "The number of bytes that have been copied into the cache", + "bytes" + ), + meterRegistry.registerLongCounter( + "es.blob_cache.population.time.total", + "The time spent copying data into the cache", + "milliseconds" ) ); } - BlobCacheMetrics(LongCounter cacheMissCounter, LongCounter evictedCountNonZeroFrequency, LongHistogram cacheMissLoadTimes) { + BlobCacheMetrics( + LongCounter cacheMissCounter, + LongCounter evictedCountNonZeroFrequency, + LongHistogram cacheMissLoadTimes, + DoubleHistogram cachePopulationThroughput, + LongCounter cachePopulationBytes, + LongCounter cachePopulationTime + ) { this.cacheMissCounter = cacheMissCounter; this.evictedCountNonZeroFrequency = evictedCountNonZeroFrequency; this.cacheMissLoadTimes = cacheMissLoadTimes; + this.cachePopulationThroughput = cachePopulationThroughput; + this.cachePopulationBytes = cachePopulationBytes; + this.cachePopulationTime = cachePopulationTime; } public static BlobCacheMetrics NOOP = new BlobCacheMetrics(TelemetryProvider.NOOP.getMeterRegistry()); @@ -56,4 +109,55 @@ public LongCounter getEvictedCountNonZeroFrequency() { public LongHistogram getCacheMissLoadTimes() { return cacheMissLoadTimes; } + + /** + * Record the various cache population metrics after a chunk is copied to the cache + * + * @param bytesCopied The number of bytes copied + * @param copyTimeNanos The time taken to copy the bytes in nanoseconds + * @param index The index being loaded + * @param shardId The ID of the shard being loaded + * @param cachePopulationReason The reason for the cache being populated + * @param cachePopulationSource The source from which the data is being loaded + */ + public void recordCachePopulationMetrics( + int bytesCopied, + long copyTimeNanos, + String index, + int shardId, + CachePopulationReason cachePopulationReason, + CachePopulationSource cachePopulationSource + ) { + Map metricAttributes = Map.of( + INDEX_ATTRIBUTE_KEY, + index, + SHARD_ID_ATTRIBUTE_KEY, + shardId, + CACHE_POPULATION_REASON_ATTRIBUTE_KEY, + cachePopulationReason.name(), + CACHE_POPULATION_SOURCE_ATTRIBUTE_KEY, + cachePopulationSource.name() + ); + assert bytesCopied > 0 : "We shouldn't be recording zero-sized copies"; + cachePopulationBytes.incrementBy(bytesCopied, metricAttributes); + + // This is almost certainly paranoid, but if we had a very fast/small copy with a very coarse nanosecond timer it might happen? + if (copyTimeNanos > 0) { + cachePopulationThroughput.record(toMebibytesPerSecond(bytesCopied, copyTimeNanos), metricAttributes); + cachePopulationTime.incrementBy(TimeUnit.NANOSECONDS.toMillis(copyTimeNanos), metricAttributes); + } else { + logger.warn("Zero-time copy being reported, ignoring"); + } + } + + /** + * Calculate throughput as MiB/second + * + * @param numberOfBytes The number of bytes transferred + * @param timeInNanoseconds The time taken to transfer in nanoseconds + * @return The throughput as MiB/second + */ + private double toMebibytesPerSecond(int numberOfBytes, long timeInNanoseconds) { + return ((double) numberOfBytes / timeInNanoseconds) * BYTES_PER_NANOSECONDS_TO_MEBIBYTES_PER_SECOND; + } } diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/CachePopulationSource.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/CachePopulationSource.java new file mode 100644 index 0000000000000..8cf4b1b548f7d --- /dev/null +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/CachePopulationSource.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.blobcache; + +/** + * The places we populate the cache from + */ +public enum CachePopulationSource { + /** + * When loading data from the blob-store + */ + BlobStore, + /** + * When fetching data from a peer node + */ + Peer, + /** + * We cannot determine the source (should not be used except in exceptional cases) + */ + Unknown +} diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 3d95db72e269d..3242a02dff525 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -398,6 +398,10 @@ public static long calculateCacheSize(Settings settings, long totalFsSize) { .getBytes(); } + public BlobCacheMetrics getBlobCacheMetrics() { + return blobCacheMetrics; + } + public int getRangeSize() { return rangeSize; } diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/BlobCacheMetricsTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/BlobCacheMetricsTests.java new file mode 100644 index 0000000000000..ea9d0b7356f0e --- /dev/null +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/BlobCacheMetricsTests.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.blobcache; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.telemetry.InstrumentType; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.RecordingMeterRegistry; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.concurrent.TimeUnit; + +public class BlobCacheMetricsTests extends ESTestCase { + + private RecordingMeterRegistry recordingMeterRegistry; + private BlobCacheMetrics metrics; + + @Before + public void createMetrics() { + recordingMeterRegistry = new RecordingMeterRegistry(); + metrics = new BlobCacheMetrics(recordingMeterRegistry); + } + + public void testRecordCachePopulationMetricsRecordsThroughput() { + int mebiBytesSent = randomIntBetween(1, 4); + int secondsTaken = randomIntBetween(1, 5); + String indexName = randomIdentifier(); + int shardId = randomIntBetween(0, 10); + BlobCacheMetrics.CachePopulationReason cachePopulationReason = randomFrom(BlobCacheMetrics.CachePopulationReason.values()); + CachePopulationSource cachePopulationSource = randomFrom(CachePopulationSource.values()); + metrics.recordCachePopulationMetrics( + Math.toIntExact(ByteSizeValue.ofMb(mebiBytesSent).getBytes()), + TimeUnit.SECONDS.toNanos(secondsTaken), + indexName, + shardId, + cachePopulationReason, + cachePopulationSource + ); + + // throughput histogram + Measurement throughputMeasurement = recordingMeterRegistry.getRecorder() + .getMeasurements(InstrumentType.DOUBLE_HISTOGRAM, "es.blob_cache.population.throughput.histogram") + .get(0); + assertEquals(throughputMeasurement.getDouble(), (double) mebiBytesSent / secondsTaken, 0.0); + assertExpectedAttributesPresent(throughputMeasurement, shardId, indexName, cachePopulationReason, cachePopulationSource); + + // bytes counter + Measurement totalBytesMeasurement = recordingMeterRegistry.getRecorder() + .getMeasurements(InstrumentType.LONG_COUNTER, "es.blob_cache.population.bytes.total") + .get(0); + assertEquals(totalBytesMeasurement.getLong(), ByteSizeValue.ofMb(mebiBytesSent).getBytes()); + assertExpectedAttributesPresent(totalBytesMeasurement, shardId, indexName, cachePopulationReason, cachePopulationSource); + + // time counter + Measurement totalTimeMeasurement = recordingMeterRegistry.getRecorder() + .getMeasurements(InstrumentType.LONG_COUNTER, "es.blob_cache.population.time.total") + .get(0); + assertEquals(totalTimeMeasurement.getLong(), TimeUnit.SECONDS.toMillis(secondsTaken)); + assertExpectedAttributesPresent(totalTimeMeasurement, shardId, indexName, cachePopulationReason, cachePopulationSource); + } + + private static void assertExpectedAttributesPresent( + Measurement measurement, + int shardId, + String indexName, + BlobCacheMetrics.CachePopulationReason cachePopulationReason, + CachePopulationSource cachePopulationSource + ) { + assertEquals(measurement.attributes().get(BlobCacheMetrics.SHARD_ID_ATTRIBUTE_KEY), shardId); + assertEquals(measurement.attributes().get(BlobCacheMetrics.INDEX_ATTRIBUTE_KEY), indexName); + assertEquals(measurement.attributes().get(BlobCacheMetrics.CACHE_POPULATION_REASON_ATTRIBUTE_KEY), cachePopulationReason.name()); + assertEquals(measurement.attributes().get(BlobCacheMetrics.CACHE_POPULATION_SOURCE_ATTRIBUTE_KEY), cachePopulationSource.name()); + } +} From 1ba72e460215a1dec74e790cdf902a0f3b8526bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Thu, 15 Aug 2024 12:36:59 +0200 Subject: [PATCH 051/389] [DOCS] Documents output_field behavior after multiple inference runs (#111875) Co-authored-by: David Kyle --- docs/reference/ingest/processors/inference.asciidoc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/reference/ingest/processors/inference.asciidoc b/docs/reference/ingest/processors/inference.asciidoc index 88d97d9422d5e..982da1fe17f7a 100644 --- a/docs/reference/ingest/processors/inference.asciidoc +++ b/docs/reference/ingest/processors/inference.asciidoc @@ -40,6 +40,11 @@ include::common-options.asciidoc[] Select the `content` field for inference and write the result to `content_embedding`. +IMPORTANT: If the specified `output_field` already exists in the ingest document, it won't be overwritten. +The {infer} results will be appended to the existing fields within `output_field`, which could lead to duplicate fields and potential errors. +To avoid this, use an unique `output_field` field name that does not clash with any existing fields. + + [source,js] -------------------------------------------------- { From 6d8e6ad1b48717c74e99ad28dc19c0ebe0842526 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Thu, 15 Aug 2024 08:50:31 -0400 Subject: [PATCH 052/389] [ESQL] date nanos binary comparisons (#111908) resolves #109992 Nothing fancy here. Nanosecond dates are still longs, and we can just compare them as longs. Please note that, as mentioned in the linked issue, this only supports comparing date nanos to other date nanos, and not comparing to millisecond dates. With the cast functions added in #111850, users can explicitly cast to millisecond dates (or longs) to compare nanos to other things. --- .../predicate/operator/comparison/Equals.java | 1 + .../operator/comparison/GreaterThan.java | 1 + .../operator/comparison/GreaterThanOrEqual.java | 1 + .../predicate/operator/comparison/LessThan.java | 1 + .../operator/comparison/LessThanOrEqual.java | 1 + .../operator/comparison/NotEquals.java | 1 + .../operator/comparison/EqualsTests.java | 17 +++++++++++++++-- .../comparison/GreaterThanOrEqualTests.java | 17 +++++++++++++++-- .../operator/comparison/GreaterThanTests.java | 17 +++++++++++++++-- .../comparison/LessThanOrEqualTests.java | 17 +++++++++++++++-- .../operator/comparison/LessThanTests.java | 17 +++++++++++++++-- .../operator/comparison/NotEqualsTests.java | 17 +++++++++++++++-- 12 files changed, 96 insertions(+), 12 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/Equals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/Equals.java index 32e15deb07b4e..614d9aa3ec920 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/Equals.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/Equals.java @@ -35,6 +35,7 @@ public class Equals extends EsqlBinaryComparison implements Negatable parameters() { ) ); // Datetime - // TODO: I'm surprised this passes. Shouldn't there be a cast from DateTime to Long? suppliers.addAll( TestCaseSupplier.forBinaryNotCasting( "EqualsLongsEvaluator", @@ -131,6 +130,20 @@ public static Iterable parameters() { ) ); + suppliers.addAll( + TestCaseSupplier.forBinaryNotCasting( + "EqualsLongsEvaluator", + "lhs", + "rhs", + Object::equals, + DataType.BOOLEAN, + TestCaseSupplier.dateNanosCases(), + TestCaseSupplier.dateNanosCases(), + List.of(), + false + ) + ); + suppliers.addAll( TestCaseSupplier.stringCases( Object::equals, @@ -204,7 +217,7 @@ public static Iterable parameters() { } private static String typeErrorString = - "boolean, cartesian_point, cartesian_shape, datetime, double, geo_point, geo_shape, integer, ip, keyword, long, text, " + "boolean, cartesian_point, cartesian_shape, datetime, date_nanos, double, geo_point, geo_shape, integer, ip, keyword, long, text, " + "unsigned_long or version"; @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java index 5435a7f629d43..a4d1bf69796e0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java @@ -106,7 +106,6 @@ public static Iterable parameters() { ) ); // Datetime - // TODO: I'm surprised this passes. Shouldn't there be a cast from DateTime to Long? suppliers.addAll( TestCaseSupplier.forBinaryNotCasting( "GreaterThanOrEqualLongsEvaluator", @@ -121,6 +120,20 @@ public static Iterable parameters() { ) ); + suppliers.addAll( + TestCaseSupplier.forBinaryNotCasting( + "GreaterThanOrEqualLongsEvaluator", + "lhs", + "rhs", + (l, r) -> ((Number) l).longValue() >= ((Number) r).longValue(), + DataType.BOOLEAN, + TestCaseSupplier.dateNanosCases(), + TestCaseSupplier.dateNanosCases(), + List.of(), + false + ) + ); + suppliers.addAll( TestCaseSupplier.stringCases( (l, r) -> ((BytesRef) l).compareTo((BytesRef) r) >= 0, @@ -137,7 +150,7 @@ public static Iterable parameters() { o, v, t, - (l, p) -> "datetime, double, integer, ip, keyword, long, text, unsigned_long or version" + (l, p) -> "date_nanos, datetime, double, integer, ip, keyword, long, text, unsigned_long or version" ) ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java index 75c22c34623b9..d3fede5c2e2ce 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java @@ -106,7 +106,6 @@ public static Iterable parameters() { ) ); // Datetime - // TODO: I'm surprised this passes. Shouldn't there be a cast from DateTime to Long? suppliers.addAll( TestCaseSupplier.forBinaryNotCasting( "GreaterThanLongsEvaluator", @@ -121,6 +120,20 @@ public static Iterable parameters() { ) ); + suppliers.addAll( + TestCaseSupplier.forBinaryNotCasting( + "GreaterThanLongsEvaluator", + "lhs", + "rhs", + (l, r) -> ((Number) l).longValue() > ((Number) r).longValue(), + DataType.BOOLEAN, + TestCaseSupplier.dateNanosCases(), + TestCaseSupplier.dateNanosCases(), + List.of(), + false + ) + ); + suppliers.addAll( TestCaseSupplier.stringCases( (l, r) -> ((BytesRef) l).compareTo((BytesRef) r) > 0, @@ -137,7 +150,7 @@ public static Iterable parameters() { o, v, t, - (l, p) -> "datetime, double, integer, ip, keyword, long, text, unsigned_long or version" + (l, p) -> "date_nanos, datetime, double, integer, ip, keyword, long, text, unsigned_long or version" ) ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java index b65c6a753e14d..3b8270c1576fd 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java @@ -106,7 +106,6 @@ public static Iterable parameters() { ) ); // Datetime - // TODO: I'm surprised this passes. Shouldn't there be a cast from DateTime to Long? suppliers.addAll( TestCaseSupplier.forBinaryNotCasting( "LessThanOrEqualLongsEvaluator", @@ -121,6 +120,20 @@ public static Iterable parameters() { ) ); + suppliers.addAll( + TestCaseSupplier.forBinaryNotCasting( + "LessThanOrEqualLongsEvaluator", + "lhs", + "rhs", + (l, r) -> ((Number) l).longValue() <= ((Number) r).longValue(), + DataType.BOOLEAN, + TestCaseSupplier.dateNanosCases(), + TestCaseSupplier.dateNanosCases(), + List.of(), + false + ) + ); + suppliers.addAll( TestCaseSupplier.stringCases( (l, r) -> ((BytesRef) l).compareTo((BytesRef) r) <= 0, @@ -137,7 +150,7 @@ public static Iterable parameters() { o, v, t, - (l, p) -> "datetime, double, integer, ip, keyword, long, text, unsigned_long or version" + (l, p) -> "date_nanos, datetime, double, integer, ip, keyword, long, text, unsigned_long or version" ) ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java index 88c79d506e0c7..647988fe35326 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java @@ -106,7 +106,20 @@ public static Iterable parameters() { ) ); // Datetime - // TODO: I'm surprised this passes. Shouldn't there be a cast from DateTime to Long? + suppliers.addAll( + TestCaseSupplier.forBinaryNotCasting( + "LessThanLongsEvaluator", + "lhs", + "rhs", + (l, r) -> ((Number) l).longValue() < ((Number) r).longValue(), + DataType.BOOLEAN, + TestCaseSupplier.dateCases(), + TestCaseSupplier.dateCases(), + List.of(), + false + ) + ); + suppliers.addAll( TestCaseSupplier.forBinaryNotCasting( "LessThanLongsEvaluator", @@ -137,7 +150,7 @@ public static Iterable parameters() { o, v, t, - (l, p) -> "datetime, double, integer, ip, keyword, long, text, unsigned_long or version" + (l, p) -> "date_nanos, datetime, double, integer, ip, keyword, long, text, unsigned_long or version" ) ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsTests.java index 06585f7c1a49d..53676a43b16a0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsTests.java @@ -115,7 +115,6 @@ public static Iterable parameters() { ) ); // Datetime - // TODO: I'm surprised this passes. Shouldn't there be a cast from DateTime to Long? suppliers.addAll( TestCaseSupplier.forBinaryNotCasting( "NotEqualsLongsEvaluator", @@ -129,6 +128,20 @@ public static Iterable parameters() { false ) ); + // Datetime + suppliers.addAll( + TestCaseSupplier.forBinaryNotCasting( + "NotEqualsLongsEvaluator", + "lhs", + "rhs", + (l, r) -> false == l.equals(r), + DataType.BOOLEAN, + TestCaseSupplier.dateNanosCases(), + TestCaseSupplier.dateNanosCases(), + List.of(), + false + ) + ); suppliers.addAll( TestCaseSupplier.stringCases( (l, r) -> false == l.equals(r), @@ -198,7 +211,7 @@ public static Iterable parameters() { } private static String typeErrorString = - "boolean, cartesian_point, cartesian_shape, datetime, double, geo_point, geo_shape, integer, ip, keyword, long, text, " + "boolean, cartesian_point, cartesian_shape, datetime, date_nanos, double, geo_point, geo_shape, integer, ip, keyword, long, text, " + "unsigned_long or version"; @Override From 13c9030c4fd6354b1fb44da6e33efba111c43df2 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 16 Aug 2024 00:41:09 +1000 Subject: [PATCH 053/389] Mute org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT #111923 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 4534d20d2f9ac..996f1e699c403 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -183,6 +183,8 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/111919 - class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT issue: https://github.com/elastic/elasticsearch/issues/111923 +- class: org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT + issue: https://github.com/elastic/elasticsearch/issues/111923 # Examples: # From 5510ad98f84f65eb87ebeaf630487bbdb39a1c4f Mon Sep 17 00:00:00 2001 From: john-wagster Date: Thu, 15 Aug 2024 09:55:52 -0500 Subject: [PATCH 054/389] Updated Function Score Query Test with Explain Fixes for 8.15.1 (#111929) * updated test for 8.15.1 * Update docs/changelog/111929.yaml * Delete docs/changelog/111929.yaml --- .../rest-api-spec/test/script_expert_scoring/20_score.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/examples/script-expert-scoring/src/yamlRestTest/resources/rest-api-spec/test/script_expert_scoring/20_score.yml b/plugins/examples/script-expert-scoring/src/yamlRestTest/resources/rest-api-spec/test/script_expert_scoring/20_score.yml index 8f0b670ef03e3..7436768416e00 100644 --- a/plugins/examples/script-expert-scoring/src/yamlRestTest/resources/rest-api-spec/test/script_expert_scoring/20_score.yml +++ b/plugins/examples/script-expert-scoring/src/yamlRestTest/resources/rest-api-spec/test/script_expert_scoring/20_score.yml @@ -55,7 +55,7 @@ setup: "document scoring with custom explanation": - requires: - cluster_features: [ "gte_v8.16.0" ] + cluster_features: [ "gte_v8.15.1" ] reason: "bug fixed where explanations were throwing npe prior to 8.16" - do: From 2261883fa18cd6cefbc951076c5d05773625e9ea Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Thu, 15 Aug 2024 12:29:42 -0400 Subject: [PATCH 055/389] Update geoip fixture files and utility methods (#111913) --- .../ingest/geoip/AbstractGeoIpIT.java | 25 +----- .../ingest/geoip/GeoIpDownloaderIT.java | 8 +- ...gDatabasesWhilePerformingGeoLookupsIT.java | 23 +++--- .../ingest/geoip/ConfigDatabasesTests.java | 30 +++---- .../geoip/DatabaseNodeServiceTests.java | 5 +- .../geoip/GeoIpProcessorFactoryTests.java | 73 ++++++------------ .../ingest/geoip/GeoIpProcessorTests.java | 8 +- .../ingest/geoip/GeoIpTestUtils.java | 60 ++++++++++++++ .../resources/GeoIP2-Anonymous-IP-Test.mmdb | Bin 4374 -> 4668 bytes .../src/test/resources/GeoIP2-City-Test.mmdb | Bin 20996 -> 22451 bytes .../GeoIP2-Connection-Type-Test.mmdb | Bin 4537 -> 4537 bytes .../test/resources/GeoIP2-Domain-Test.mmdb | Bin 6449 -> 6449 bytes .../resources/GeoIP2-Enterprise-Test.mmdb | Bin 9901 -> 9901 bytes .../src/test/resources/GeoIP2-ISP-Test.mmdb | Bin 75076 -> 75076 bytes .../test/resources/GeoLite2-City-Test.mmdb | Bin 20809 -> 21117 bytes 15 files changed, 117 insertions(+), 115 deletions(-) create mode 100644 modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpTestUtils.java diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/AbstractGeoIpIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/AbstractGeoIpIT.java index ae811db226b06..92ec911dbf451 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/AbstractGeoIpIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/AbstractGeoIpIT.java @@ -16,17 +16,14 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.StreamsUtils; import org.junit.ClassRule; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.nio.file.Files; import java.nio.file.Path; import java.util.Collection; import java.util.List; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDefaultDatabases; + public abstract class AbstractGeoIpIT extends ESIntegTestCase { private static final boolean useFixture = Booleans.parseBoolean(System.getProperty("geoip_use_service", "false")) == false; @@ -45,23 +42,7 @@ protected Collection> nodePlugins() { @Override protected Settings nodeSettings(final int nodeOrdinal, final Settings otherSettings) { final Path databasePath = createTempDir(); - try { - Files.createDirectories(databasePath); - Files.copy( - new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-City.mmdb")), - databasePath.resolve("GeoLite2-City.mmdb") - ); - Files.copy( - new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-Country.mmdb")), - databasePath.resolve("GeoLite2-Country.mmdb") - ); - Files.copy( - new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-ASN.mmdb")), - databasePath.resolve("GeoLite2-ASN.mmdb") - ); - } catch (final IOException e) { - throw new UncheckedIOException(e); - } + copyDefaultDatabases(databasePath); return Settings.builder() .put("ingest.geoip.database_path", databasePath) .put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), false) diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java index f7ab384c69bf1..d994bd70eb7a0 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java @@ -66,6 +66,7 @@ import java.util.zip.GZIPInputStream; import static org.elasticsearch.ingest.ConfigurationUtils.readStringProperty; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDefaultDatabases; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.anEmptyMap; @@ -688,12 +689,7 @@ private void setupDatabasesInConfigDirectory() throws Exception { .forEach(path -> { try { Files.createDirectories(path); - Files.copy(GeoIpDownloaderIT.class.getResourceAsStream("/GeoLite2-City.mmdb"), path.resolve("GeoLite2-City.mmdb")); - Files.copy(GeoIpDownloaderIT.class.getResourceAsStream("/GeoLite2-ASN.mmdb"), path.resolve("GeoLite2-ASN.mmdb")); - Files.copy( - GeoIpDownloaderIT.class.getResourceAsStream("/GeoLite2-Country.mmdb"), - path.resolve("GeoLite2-Country.mmdb") - ); + copyDefaultDatabases(path); } catch (IOException e) { throw new UncheckedIOException(e); } diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/ReloadingDatabasesWhilePerformingGeoLookupsIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/ReloadingDatabasesWhilePerformingGeoLookupsIT.java index 8d8b0b4215b3f..87daefab7b428 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/ReloadingDatabasesWhilePerformingGeoLookupsIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/ReloadingDatabasesWhilePerformingGeoLookupsIT.java @@ -22,10 +22,8 @@ import org.elasticsearch.watcher.ResourceWatcherService; import java.io.IOException; -import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.StandardCopyOption; import java.util.Arrays; import java.util.HashMap; import java.util.List; @@ -34,7 +32,8 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import static org.elasticsearch.ingest.geoip.GeoIpProcessorFactoryTests.copyDatabaseFiles; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDatabase; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDefaultDatabases; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; @@ -68,8 +67,8 @@ public void test() throws Exception { when(clusterService.state()).thenReturn(ClusterState.EMPTY_STATE); DatabaseNodeService databaseNodeService = createRegistry(geoIpConfigDir, geoIpTmpDir, clusterService); GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), geoIpTmpDir.resolve("GeoLite2-City.mmdb")); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), geoIpTmpDir.resolve("GeoLite2-City-Test.mmdb")); + copyDatabase("GeoLite2-City-Test.mmdb", geoIpTmpDir.resolve("GeoLite2-City.mmdb")); + copyDatabase("GeoLite2-City-Test.mmdb", geoIpTmpDir.resolve("GeoLite2-City-Test.mmdb")); databaseNodeService.updateDatabase("GeoLite2-City.mmdb", "md5", geoIpTmpDir.resolve("GeoLite2-City.mmdb")); databaseNodeService.updateDatabase("GeoLite2-City-Test.mmdb", "md5", geoIpTmpDir.resolve("GeoLite2-City-Test.mmdb")); lazyLoadReaders(databaseNodeService); @@ -138,18 +137,14 @@ public void test() throws Exception { assertThat(previous1.current(), equalTo(-1)); }); } else { - Files.copy( - ConfigDatabases.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), - geoIpTmpDir.resolve("GeoLite2-City.mmdb"), - StandardCopyOption.REPLACE_EXISTING - ); + copyDatabase("GeoLite2-City-Test.mmdb", geoIpTmpDir.resolve("GeoLite2-City.mmdb")); databaseNodeService.updateDatabase("GeoLite2-City.mmdb", "md5", geoIpTmpDir.resolve("GeoLite2-City.mmdb")); } DatabaseReaderLazyLoader previous2 = databaseNodeService.get("GeoLite2-City-Test.mmdb"); - InputStream source = ConfigDatabases.class.getResourceAsStream( - i % 2 == 0 ? "/GeoIP2-City-Test.mmdb" : "/GeoLite2-City-Test.mmdb" + copyDatabase( + i % 2 == 0 ? "GeoIP2-City-Test.mmdb" : "GeoLite2-City-Test.mmdb", + geoIpTmpDir.resolve("GeoLite2-City-Test.mmdb") ); - Files.copy(source, geoIpTmpDir.resolve("GeoLite2-City-Test.mmdb"), StandardCopyOption.REPLACE_EXISTING); databaseNodeService.updateDatabase("GeoLite2-City-Test.mmdb", "md5", geoIpTmpDir.resolve("GeoLite2-City-Test.mmdb")); DatabaseReaderLazyLoader current1 = databaseNodeService.get("GeoLite2-City.mmdb"); @@ -194,7 +189,7 @@ private static DatabaseNodeService createRegistry(Path geoIpConfigDir, Path geoI throws IOException { GeoIpCache cache = new GeoIpCache(0); ConfigDatabases configDatabases = new ConfigDatabases(geoIpConfigDir, cache); - copyDatabaseFiles(geoIpConfigDir, configDatabases); + copyDefaultDatabases(geoIpConfigDir, configDatabases); DatabaseNodeService databaseNodeService = new DatabaseNodeService( geoIpTmpDir, mock(Client.class), diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/ConfigDatabasesTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/ConfigDatabasesTests.java index 01d7cdc9b9d5c..7b962fed0ca83 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/ConfigDatabasesTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/ConfigDatabasesTests.java @@ -20,12 +20,11 @@ import org.junit.After; import org.junit.Before; -import java.io.IOException; -import java.nio.file.CopyOption; import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.StandardCopyOption; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDatabase; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDefaultDatabases; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -62,8 +61,8 @@ public void testLocalDatabasesEmptyConfig() throws Exception { public void testDatabasesConfigDir() throws Exception { Path configDir = createTempDir(); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoIP2-City-Test.mmdb"), configDir.resolve("GeoIP2-City.mmdb")); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), configDir.resolve("GeoLite2-City.mmdb")); + copyDatabase("GeoIP2-City-Test.mmdb", configDir.resolve("GeoIP2-City.mmdb")); + copyDatabase("GeoLite2-City-Test.mmdb", configDir.resolve("GeoLite2-City.mmdb")); ConfigDatabases configDatabases = new ConfigDatabases(configDir, new GeoIpCache(0)); configDatabases.initialize(resourceWatcherService); @@ -92,9 +91,9 @@ public void testDatabasesDynamicUpdateConfigDir() throws Exception { assertThat(loader.getDatabaseType(), equalTo("GeoLite2-Country")); } - CopyOption option = StandardCopyOption.REPLACE_EXISTING; - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoIP2-City-Test.mmdb"), configDir.resolve("GeoIP2-City.mmdb")); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), configDir.resolve("GeoLite2-City.mmdb"), option); + copyDatabase("GeoIP2-City-Test.mmdb", configDir.resolve("GeoIP2-City.mmdb")); + copyDatabase("GeoLite2-City-Test.mmdb", configDir.resolve("GeoLite2-City.mmdb")); + assertBusy(() -> { assertThat(configDatabases.getConfigDatabases().size(), equalTo(4)); DatabaseReaderLazyLoader loader = configDatabases.getDatabase("GeoLite2-ASN.mmdb"); @@ -116,7 +115,8 @@ public void testDatabasesDynamicUpdateConfigDir() throws Exception { public void testDatabasesUpdateExistingConfDatabase() throws Exception { Path configDir = createTempDir(); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-City.mmdb"), configDir.resolve("GeoLite2-City.mmdb")); + copyDatabase("GeoLite2-City.mmdb", configDir); + GeoIpCache cache = new GeoIpCache(1000); // real cache to test purging of entries upon a reload ConfigDatabases configDatabases = new ConfigDatabases(configDir, cache); configDatabases.initialize(resourceWatcherService); @@ -131,11 +131,7 @@ public void testDatabasesUpdateExistingConfDatabase() throws Exception { assertThat(cache.count(), equalTo(1)); } - Files.copy( - ConfigDatabases.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), - configDir.resolve("GeoLite2-City.mmdb"), - StandardCopyOption.REPLACE_EXISTING - ); + copyDatabase("GeoLite2-City-Test.mmdb", configDir.resolve("GeoLite2-City.mmdb")); assertBusy(() -> { assertThat(configDatabases.getConfigDatabases().size(), equalTo(1)); assertThat(cache.count(), equalTo(0)); @@ -154,11 +150,9 @@ public void testDatabasesUpdateExistingConfDatabase() throws Exception { }); } - private static Path prepareConfigDir() throws IOException { + private static Path prepareConfigDir() { Path dir = createTempDir(); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-ASN.mmdb"), dir.resolve("GeoLite2-ASN.mmdb")); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-City.mmdb"), dir.resolve("GeoLite2-City.mmdb")); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-Country.mmdb"), dir.resolve("GeoLite2-Country.mmdb")); + copyDefaultDatabases(dir); return dir; } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java index 34d5429142cec..1579c7020c58a 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java @@ -83,7 +83,7 @@ import java.util.zip.GZIPInputStream; import java.util.zip.GZIPOutputStream; -import static org.elasticsearch.ingest.geoip.GeoIpProcessorFactoryTests.copyDatabaseFiles; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDefaultDatabases; import static org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; import static org.elasticsearch.persistent.PersistentTasksCustomMetadata.TYPE; import static org.hamcrest.Matchers.empty; @@ -117,10 +117,9 @@ public class DatabaseNodeServiceTests extends ESTestCase { @Before public void setup() throws IOException { final Path geoIpConfigDir = createTempDir(); - Files.createDirectories(geoIpConfigDir); GeoIpCache cache = new GeoIpCache(1000); ConfigDatabases configDatabases = new ConfigDatabases(geoIpConfigDir, cache); - copyDatabaseFiles(geoIpConfigDir, configDatabases); + copyDefaultDatabases(geoIpConfigDir, configDatabases); threadPool = new TestThreadPool(ConfigDatabases.class.getSimpleName()); Settings settings = Settings.builder().put("resource.reload.interval.high", TimeValue.timeValueMillis(100)).build(); diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java index 663ae1152246a..a0541df0d4d8a 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -25,18 +25,15 @@ import org.elasticsearch.ingest.geoip.Database.Property; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.junit.After; import org.junit.Before; -import java.io.ByteArrayInputStream; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -45,6 +42,9 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.DEFAULT_DATABASES; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDatabase; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDefaultDatabases; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; @@ -57,8 +57,6 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { - static Set DEFAULT_DATABASE_FILENAMES = Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb"); - private Path geoipTmpDir; private Path geoIpConfigDir; private ConfigDatabases configDatabases; @@ -74,7 +72,7 @@ public void loadDatabaseReaders() throws IOException { Client client = mock(Client.class); GeoIpCache cache = new GeoIpCache(1000); configDatabases = new ConfigDatabases(geoIpConfigDir, new GeoIpCache(1000)); - copyDatabaseFiles(geoIpConfigDir, configDatabases); + copyDefaultDatabases(geoIpConfigDir, configDatabases); geoipTmpDir = createTempDir(); clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn(ClusterState.EMPTY_STATE); @@ -181,7 +179,7 @@ public void testBuildDbFile() throws Exception { assertFalse(processor.isIgnoreMissing()); } - public void testBuildWithCountryDbAndAsnFields() throws Exception { + public void testBuildWithCountryDbAndAsnFields() { GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -201,7 +199,7 @@ public void testBuildWithCountryDbAndAsnFields() throws Exception { ); } - public void testBuildWithAsnDbAndCityFields() throws Exception { + public void testBuildWithAsnDbAndCityFields() { GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -218,10 +216,7 @@ public void testBuildWithAsnDbAndCityFields() throws Exception { } public void testBuildNonExistingDbFile() throws Exception { - Files.copy( - GeoIpProcessorFactoryTests.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), - geoipTmpDir.resolve("GeoLite2-City.mmdb") - ); + copyDatabase("GeoLite2-City-Test.mmdb", geoipTmpDir.resolve("GeoLite2-City.mmdb")); databaseNodeService.updateDatabase("GeoLite2-City.mmdb", "md5", geoipTmpDir.resolve("GeoLite2-City.mmdb")); GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); @@ -234,11 +229,11 @@ public void testBuildNonExistingDbFile() throws Exception { public void testBuildBuiltinDatabaseMissing() throws Exception { GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); - cleanDatabaseFiles(geoIpConfigDir, configDatabases); + cleanDatabases(geoIpConfigDir, configDatabases); Map config = new HashMap<>(); config.put("field", "_field"); - config.put("database_file", randomFrom(DEFAULT_DATABASE_FILENAMES)); + config.put("database_file", randomFrom(DEFAULT_DATABASES)); Processor processor = factory.create(null, null, null, config); assertThat(processor, instanceOf(GeoIpProcessor.DatabaseUnavailableProcessor.class)); } @@ -267,7 +262,7 @@ public void testBuildFields() throws Exception { assertFalse(processor.isIgnoreMissing()); } - public void testBuildIllegalFieldOption() throws Exception { + public void testBuildIllegalFieldOption() { GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config1 = new HashMap<>(); @@ -324,14 +319,13 @@ public void testBuildNullDatabase() throws Exception { assertThat(e.getMessage(), equalTo("[database_file] Unsupported database type [null] for file [GeoLite2-City.mmdb]")); } - @SuppressWarnings("HiddenField") public void testLazyLoading() throws Exception { final Path configDir = createTempDir(); final Path geoIpConfigDir = configDir.resolve("ingest-geoip"); Files.createDirectories(geoIpConfigDir); GeoIpCache cache = new GeoIpCache(1000); ConfigDatabases configDatabases = new ConfigDatabases(geoIpConfigDir, cache); - copyDatabaseFiles(geoIpConfigDir, configDatabases); + copyDefaultDatabases(geoIpConfigDir, configDatabases); // Loading another database reader instances, because otherwise we can't test lazy loading as the // database readers used at class level are reused between tests. (we want to keep that otherwise running this @@ -358,7 +352,7 @@ public void testLazyLoading() throws Exception { config.put("database_file", "GeoLite2-City.mmdb"); final GeoIpProcessor city = (GeoIpProcessor) factory.create(null, "_tag", null, config); - // these are lazy loaded until first use so we expect null here + // these are lazy loaded until first use, so we expect null here assertNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoLite2-City.mmdb").databaseReader.get()); city.execute(document); // the first ingest should trigger a database load @@ -369,7 +363,7 @@ public void testLazyLoading() throws Exception { config.put("database_file", "GeoLite2-Country.mmdb"); final GeoIpProcessor country = (GeoIpProcessor) factory.create(null, "_tag", null, config); - // these are lazy loaded until first use so we expect null here + // these are lazy loaded until first use, so we expect null here assertNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoLite2-Country.mmdb").databaseReader.get()); country.execute(document); // the first ingest should trigger a database load @@ -380,22 +374,21 @@ public void testLazyLoading() throws Exception { config.put("database_file", "GeoLite2-ASN.mmdb"); final GeoIpProcessor asn = (GeoIpProcessor) factory.create(null, "_tag", null, config); - // these are lazy loaded until first use so we expect null here + // these are lazy loaded until first use, so we expect null here assertNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoLite2-ASN.mmdb").databaseReader.get()); asn.execute(document); // the first ingest should trigger a database load assertNotNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoLite2-ASN.mmdb").databaseReader.get()); } - @SuppressWarnings("HiddenField") public void testLoadingCustomDatabase() throws IOException { final Path configDir = createTempDir(); final Path geoIpConfigDir = configDir.resolve("ingest-geoip"); Files.createDirectories(geoIpConfigDir); ConfigDatabases configDatabases = new ConfigDatabases(geoIpConfigDir, new GeoIpCache(1000)); - copyDatabaseFiles(geoIpConfigDir, configDatabases); + copyDefaultDatabases(geoIpConfigDir, configDatabases); // fake the GeoIP2-City database - copyDatabaseFile(geoIpConfigDir, "GeoLite2-City.mmdb"); + copyDatabase("GeoLite2-City.mmdb", geoIpConfigDir); Files.move(geoIpConfigDir.resolve("GeoLite2-City.mmdb"), geoIpConfigDir.resolve("GeoIP2-City.mmdb")); /* @@ -428,7 +421,7 @@ public void testLoadingCustomDatabase() throws IOException { config.put("database_file", "GeoIP2-City.mmdb"); final GeoIpProcessor city = (GeoIpProcessor) factory.create(null, "_tag", null, config); - // these are lazy loaded until first use so we expect null here + // these are lazy loaded until first use, so we expect null here assertNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoIP2-City.mmdb").databaseReader.get()); city.execute(document); // the first ingest should trigger a database load @@ -490,7 +483,7 @@ public void testUpdateDatabaseWhileIngesting() throws Exception { assertThat(geoData.get("city_name"), equalTo("Tumba")); } { - copyDatabaseFile(geoipTmpDir, "GeoLite2-City-Test.mmdb"); + copyDatabase("GeoLite2-City-Test.mmdb", geoipTmpDir); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); databaseNodeService.updateDatabase("GeoLite2-City.mmdb", "md5", geoipTmpDir.resolve("GeoLite2-City-Test.mmdb")); processor.execute(ingestDocument); @@ -498,7 +491,7 @@ public void testUpdateDatabaseWhileIngesting() throws Exception { assertThat(geoData.get("city_name"), equalTo("Linköping")); } { - // No databases are available, so assume that databases still need to be downloaded and therefor not fail: + // No databases are available, so assume that databases still need to be downloaded and therefore not fail: IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); databaseNodeService.removeStaleEntries(List.of("GeoLite2-City.mmdb")); configDatabases.updateDatabase(geoIpConfigDir.resolve("GeoLite2-City.mmdb"), false); @@ -507,7 +500,7 @@ public void testUpdateDatabaseWhileIngesting() throws Exception { assertThat(geoData, nullValue()); } { - // There are database available, but not the right one, so tag: + // There are databases available, but not the right one, so tag: databaseNodeService.updateDatabase("GeoLite2-City-Test.mmdb", "md5", geoipTmpDir.resolve("GeoLite2-City-Test.mmdb")); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); processor.execute(ingestDocument); @@ -517,7 +510,7 @@ public void testUpdateDatabaseWhileIngesting() throws Exception { public void testDatabaseNotReadyYet() throws Exception { GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); - cleanDatabaseFiles(geoIpConfigDir, configDatabases); + cleanDatabases(geoIpConfigDir, configDatabases); { Map config = new HashMap<>(); @@ -542,7 +535,7 @@ public void testDatabaseNotReadyYet() throws Exception { ); } - copyDatabaseFile(geoipTmpDir, "GeoLite2-City-Test.mmdb"); + copyDatabase("GeoLite2-City-Test.mmdb", geoipTmpDir); databaseNodeService.updateDatabase("GeoLite2-City.mmdb", "md5", geoipTmpDir.resolve("GeoLite2-City-Test.mmdb")); { @@ -562,25 +555,9 @@ public void testDatabaseNotReadyYet() throws Exception { } } - private static void copyDatabaseFile(final Path path, final String databaseFilename) throws IOException { - Files.copy( - new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/" + databaseFilename)), - path.resolve(databaseFilename), - StandardCopyOption.REPLACE_EXISTING - ); - } - - static void copyDatabaseFiles(final Path path, ConfigDatabases configDatabases) throws IOException { - for (final String databaseFilename : DEFAULT_DATABASE_FILENAMES) { - copyDatabaseFile(path, databaseFilename); - configDatabases.updateDatabase(path.resolve(databaseFilename), true); + private static void cleanDatabases(final Path directory, ConfigDatabases configDatabases) { + for (final String database : DEFAULT_DATABASES) { + configDatabases.updateDatabase(directory.resolve(database), false); } } - - static void cleanDatabaseFiles(final Path path, ConfigDatabases configDatabases) throws IOException { - for (final String databaseFilename : DEFAULT_DATABASE_FILENAMES) { - configDatabases.updateDatabase(path.resolve(databaseFilename), false); - } - } - } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java index 87d1881a9e743..762818a7c65db 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java @@ -134,7 +134,7 @@ public void testNonExistentWithIgnoreMissing() throws Exception { assertIngestDocument(originalIngestDocument, ingestDocument); } - public void testNullWithoutIgnoreMissing() throws Exception { + public void testNullWithoutIgnoreMissing() { GeoIpProcessor processor = new GeoIpProcessor( randomAlphaOfLength(10), null, @@ -156,7 +156,7 @@ public void testNullWithoutIgnoreMissing() throws Exception { assertThat(exception.getMessage(), equalTo("field [source_field] is null, cannot extract geoip information.")); } - public void testNonExistentWithoutIgnoreMissing() throws Exception { + public void testNonExistentWithoutIgnoreMissing() { GeoIpProcessor processor = new GeoIpProcessor( randomAlphaOfLength(10), null, @@ -526,7 +526,7 @@ public void testAddressIsNotInTheDatabase() throws Exception { /** * Don't silently do DNS lookups or anything trappy on bogus data */ - public void testInvalid() throws Exception { + public void testInvalid() { GeoIpProcessor processor = new GeoIpProcessor( randomAlphaOfLength(10), null, @@ -803,7 +803,7 @@ long databaseFileSize() throws IOException { } @Override - InputStream databaseInputStream() throws IOException { + InputStream databaseInputStream() { return databaseInputStreamSupplier.get(); } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpTestUtils.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpTestUtils.java new file mode 100644 index 0000000000000..a3d72aca2295c --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpTestUtils.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip; + +import org.elasticsearch.core.SuppressForbidden; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Set; + +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; + +public final class GeoIpTestUtils { + + private GeoIpTestUtils() { + // utility class + } + + public static final Set DEFAULT_DATABASES = Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb"); + + @SuppressForbidden(reason = "uses java.io.File") + private static boolean isDirectory(final Path path) { + return path.toFile().isDirectory(); + } + + public static void copyDatabase(final String databaseName, final Path destination) { + try (InputStream is = GeoIpTestUtils.class.getResourceAsStream("/" + databaseName)) { + if (is == null) { + throw new FileNotFoundException("Resource [" + databaseName + "] not found in classpath"); + } + + Files.copy(is, isDirectory(destination) ? destination.resolve(databaseName) : destination, REPLACE_EXISTING); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public static void copyDefaultDatabases(final Path directory) { + for (final String database : DEFAULT_DATABASES) { + copyDatabase(database, directory); + } + } + + public static void copyDefaultDatabases(final Path directory, ConfigDatabases configDatabases) { + for (final String database : DEFAULT_DATABASES) { + copyDatabase(database, directory); + configDatabases.updateDatabase(directory.resolve(database), true); + } + } +} diff --git a/modules/ingest-geoip/src/test/resources/GeoIP2-Anonymous-IP-Test.mmdb b/modules/ingest-geoip/src/test/resources/GeoIP2-Anonymous-IP-Test.mmdb index 17fc3715090ae7ece44394b89acb6e03c58d9356..1b142d0001b9c6836fae618a0cb2c86a423cea83 100644 GIT binary patch literal 4668 zcmZA32Y3`k6u|K}OW^3zds%u7CG_6GBm|aa5iufWn~0;Kv(Dn-Ju8cgkI1aT<8ORp&#^z0Wc5-!C)8y zLtz*Uhg=u|BViPbhCCPpV__VOhY2tdCc$Kw0#jicOotgT6K26|m;-ZR9?XXYus7sG z0TjYQD1t?>7>Z#Zef&DbzC=r4DeMRPgT5sGE42&`go9u?91JVq5D3Fch=|D3A|=X6 zBBdmw;A!%Bkusuki=!8kRU&$2|LUtXN{Xx@s(?y}Tl_9bh@+kQqm9-=3eu1fsbXAp z(0M4e8j;!{xlZJ;AUa%ReGnZXvcX1jBpd}t!!d9y9B1*D#$5uJ!ewx| z#qQu04BseH7bLGFc@tT;2-6*n&=W=LJZ15p(=$5R zwB__7%ymMZU4gMj?yrQgoNT zBl=!{zl;<4LFp~>BgvoOXH9ky`9*0i@+-;R@EiONf54yc7yJ$XSd4DG?uJo)9B8Cd zHxrXX)EJro%Qa1HF%1AeqKx~F|8T57qo%4L1#NU+l%Qyts`^_S~}Yn zy^84?B)f^}9z;FF^kiHwlJzU|7vYNOL#;3LgZ^3@%+&{o8Ax)F#b3N$&)&ljF+-^h z%ht?rYPn)Y1j&&kM~NBDfV`kKM$A|uR%XTrwF!)yC}tAlCc_k%3e#XZ%z&9N%VL*1 zo5|+DTru-#nGXxHoelSwPiFxX!a^v5MX(r(VISBRmcUZj5B9gnN-@j0di}j0M6?_Z zh81uKgkdE_K+iwDM^g$>@SqIJLC-$F(|?T0YFGoDf2NWs4hcxYT1Y_}GEfE8a46J3 zEv$pX;BZ(EN5BR+5{?4SKXVKb=bt%_=y*5*PK1--WH<#l|IBGbr(5)4+nqQQ&VsXn z^Us`1bRL`!7r=#Z5nK$H=m)gbpH|GJVlE?jIa~o7p-x-g7IUR?lbEYWUajaieGOa- z*TMC01KbFkEc!axaW^aaY;UpHMcgXpHj=l)9l-f#?jpJy?ty#ZKDZwqfCu3rco-gm zN8vGxz53(q1m~Z5Qp{69?P)R3FyL8u4xWb>;6-=|Hp9#C3cL!h!Rzn_ya{hv>dzIs zvUkM1Ym+v5k9EE;=7S*lA<2*6V==l;5q=74Ka2T=Bsg-`^GU@;V1_V~CQ&Oe9q&*A)Y^!)Qb89o0F zQ20Z_Sw=FZ@7#a>ZP~UkmJUbaiFj>gqAG1d#e0U~>SWx6w!4lAxoxRs5~;9P6U&6- ziKu5n+iWej{)Z(xcvT{uiN(vq$yA~`7WGmlw4-6XyU2tVYfG}Kq#{=8chuCH&{EB$ zytMYlGqFfTL)vYa)Lm;rn>Oz%iqsUv;?WVMiOQYJOJeazsy1AfNL5BM;mXKrT~)Q0 zO2-oMI_Lk@SUjk1EG(&tRYb#HGEurJ$0@5Dw!0`A$wW#bX)m0qO?pcfc!~Vtyxdt0 zTaue!Tp0DzrKwmlqtkDl?ZsE*(d1^E-27s9sh7^UbL;1F2bV?Gcy4{4e}&<$uGaNx zB9+Ms&rMdPl8LmJb`$Z6+93t8WOn&Y@+%_o@~TL=mo_=>`T~ES!=;I;c;*S`z`~SQ Onn*>%>DW52uE)Rf@u~Fy literal 4374 zcmZA31$b3u9LMqBdl+20I}e>h#coP*jKLI4#LM08VO-emx!ij>YhE%hkv|rYA=$J%)OFhfA^G{+2~BEh^RTVfEcv2EeA@ih+0D% zXbbJ2J#>JM&cO;V=S5!YCLG zW1twu!d9>~Yy;z9Ti6b^haF%?*a>!qU0_$(4R(h;z&@tnQH_TQFcBufWY`P#hAFTQ zl)zLdg=tU*(_sdb!@lec_OA_q!AUGHff!S~<%z?uo4s#(P=}D== zahr|qHyENLJsm$E#vB< z&cmtIOAS$Sv2;Wf9Vsn|qNAjv!$`-#v2Yw54=2EhA;AV6d*Mq9k(MdA%%E_Jr2C~) zE!Rk=kv!c-=SgQo1J0DrqINc%1LxAhmcl!6K38-BTqs>ci(N|>OP4V2Qdka`!Q~<0 zELSjmg|sqCUP*ElTm@H$ILoAKndLgTJ{os}w3_6Na1-1Nx4^9-;p(?Zx0AdhO5Q2m zMe=UAC#u~m-B*a5Rni0YY9EsxEF|@i^f2Qdfkz89`z*p`*TCcO1Q%wX(^Gb`uSq^F zJ!7MHiJq07v(f9)^M%Ru0=2d9BD@4I!z=JAyjFM_-e6~M!dvh*yb}`avuV5UN$(eu z`at@S&W~UntdCkYu--=a7(S6UMJ=C_{0ufrpGUPXNPa1OMba)A>}QL`-lcDdzO{c} zwwJ!M43NGj`2+lDlRc%MEUl%VN&W)A!f)_9`~iQ$U+{N`u{YkXZma}o#jav{MakYW zeWIwZOh3l;Cu#4cy`tdG8dqi@wLvf#hS=H&T87FDBRM=I7-!cDhmVvQMQwDUX2wt} zmKhr*8wYGHvke2rMYU~Zwj*L^W{0S@Ba`hUvoqs%fn8xY*d6wOV1xFFjfV*#;dUo7 z*(8`OvllIU!<0g2)Ag0mITcD_8kE6wm;vRmFYE_1VHWHU2ZU&@%t5SvFdPE2;ZT?Z zhd~_XLIUjh7hH5DB*BAuPzCdW^Uo|GS_qtfhV##)N&1k1MUaIY%;7j)J4%7&sO<|IG13oPXv-qLW}LEQ6Eb6gU+)|BOBVf}3?lNYf{J7Rj^W9N_#j z=MkL`7r=#Z5nK$Hz@_#Bx`!vdT;?*8m%|mX0#@428)dE}S_M~G>^FTiTm#p_b#Oi0 z0INgn>s0tQZ<4v$MvWi(E$ro1ncJe|?IiDjJ7w+)YvyiR_!`4G?ajK+CWq5`zsv*C zEDy>&6h#ly@(4T%kIAeFTg>B8C+DAeQs${B`LxV4bUq8uMYZQ;ULd&^UWAw6Wq1W% zh1cM9cmv)9&Oh@u(L3-iya(^Y2k;?$1nXcuY=DjMF?<4>;8XYvHpA!e1=zDMcriKu z%-2L);2ZcBz5~ub^8?Y3@Dp(UnO}&0h2P+J_yhigzu@l>M_|B)!MoH9il8~PfEcud zR?r&SKwD@B?V$s7gig>IxV`?GGtv{WvhNc-uA8oxGYV&(r1<8_&| ziEVTp6LULJ%lp~5SD(tq(|*!3u?=CZxbYt|?cn)-E}u$Q#WPvIE|v7MCbp?*ygS{* zX4sZYZAEpeGU%vpFtJ%Sll5}8H=R!Yq6I;D@OIf16ER{}_XmU%+-C15P?@n%< z%N;Qna*QWDpoUKZ;UZtN+#&fB~-paoJ04ww}Bme*a diff --git a/modules/ingest-geoip/src/test/resources/GeoIP2-City-Test.mmdb b/modules/ingest-geoip/src/test/resources/GeoIP2-City-Test.mmdb index 7ed43d616a85d3975a12c8c4203249e0f0d888ef..04220ff4b6411ae254f8167ad696c88f3bf89b9f 100644 GIT binary patch literal 22451 zcmZ{q2VfM{7WeP&&VYibD4-yb33Wj!f(5LkZZ-)?Od+VjxFk!mB-xEyghcc~f=cg2 z1cA_tNa(%j+Uv&dYl>+^ed>EQd_ML2-*aZR_fUj6_BfBbkxHNM+aWQ265hj=snYxB?Eu(k% z3CN?TUnfj`IWLZJeYl9JpD<}GjA!&`42TpN$afheOoKUZ2*VmFGL(x9BV1l8OgD0o z;i%^_VHyE=g=r*EBut|??2Wy!+30G;SR=5#xBNg#vTUyKRMu6wF=YoEZj$sLQ^BRXg}kC zFdgI~?Ek39Va_|kILbICuKI~fo?x70oFb6kX<<4ONu3p@b6n&F#*2&~<0Zz+1X6!R zm|l&fUK1wveCc_mD81=em5sXlAr9S{ZGOcE$xp2jfe|SB$S2|6+W@_?E%`&jjCx z-Y~KMGkwn~_J5`yIi(L3`#;k~7XHHcFXLCn{}{h9erNnaFbjr>fg~PlwlEZi%D~K$ zd7T-TFfL_uVRU6&#^}bloN)!?N=A3aRg9|{Js3S1y%_BOyCy zGU6E5Gx{+!Mm(cGV*q0y?(&AbtuPM~=D{o+!mu)iGKQhZSz*2b*e1+30=EeBaLyaS z7|CG&XTFJ3qZwlu?El0=oVt|(|7Ugy^H?BVn8yKkbCqsq+#$?&a>=_0J(qFn9+XTH z=JA|LU?dWHp?hR!C8LNc#0X2EFjbgK zS;0v#8ePW1@)(6_P?#so6+o>pR{}m^t^%rs+06=`n7cg8LN5#T?(sumx-bVg?~xdV z87!!I+ZFwfz)|5uPWohjD=aj>EzIw5$#)qcMib*b#`}y92vnsHh54gMiv6GY6Jh=o zhkO4P=D%Wact2)l|7T|ZXJ-Fr{--d1h9b>e-Oq*jJU`(J!nF)@3yQQ0b1Uby5qe`P zq6X#*j1EGdA)NXOPxlJ**T4%Id~JVW6;F55x;gKNf0)_zF(3|FaCBls+n! zK~S&?%U~|TLuK4^C<})X;?Z^TK{rAnTUdqzIvWd7PI|FxoJY zg@pv;p7%lFAz`^6m@F*%z!gyuqe>65UNOPAnS+H>2m^L-suXvr5El49OS!O2Wnme? z&~veH8llfaoT`L^M_8&jL1b{ih@(A#luuKPPg=Gfo z)er`9JuQ#2a3`+VR;gmCoE5K5%zz^T$<0q z1%!bsIJF20ONC`Ir$f;&1 zd?_p~K)bNCvapR{tRwb+mJSjcJ@^$Az7dwMS>ayKW7nnTdCt>*>z#97j zxJ_7o1bPU|Pk<^cKjWUi3(G~|S7G^u@A6-Qv3&o>!rur(^df&i-7H`6$@QR^2v#0c z#lk{lBCiiy4W$zlx(TH-&_yWh|CCFqh~5UJE9+fG(4DDrITX4Jh3%(uWsJgAtZ+3! zubI*l3Ra=?0^)^o4RF0st_7|WN^e%^L(mtO(wBvCgrWJI>c`Ls`f^hG<1T}QGJx|2 z#^eoV;Sd(uI5iXsql7XH7%r3>Sa>5r_tVM<7LFw7HcPn)3O5U7H0O;W=y8E^3kz>0 z=wqaeg+ifF#sO(UxgAIl${oPnLb;O_?jq<5LAi&8;|WIlVBM4yp(Jr$GAr;RQBqmR z4Kw=M4uu?{qyrO#lEFnX2}aGbSeQ*1#+6X+g+jhiayc)LV6?q}g%b(7T~a1Np+YG4 z0i{B@A8-ie0pLNQ6tTi&g7Mg57V_jX?lOgiB{3zPxJ$WErgB~xVc2J!axtb6jJs4q z!6%d|pjs$yF5)5Fz=l(Kn1x<~(Yt;qOczRk^By7SOIw-2!Wx1x^&f@8QK8HPo)*eu zz&xSA|0z!h;P-d~-Y=ZHidKS(h7`1+qg-g)mlvV%vQUDY z_Yy&mKb2Ql_$p!eJDhqQ3L&Aq0lY1gH(B@=!B|du$#+R;Ot&T|d?1wfSmAwwF?&8_ z;YS3+9DV|YcAWbK%9pJ76&KMbE&D&^n;7ADQ21Ub|7L~%5R68{|H+yBV~hg)pZd5^ zE&_K5|0L(05X*pp?q~@47LZvOuqkP!*uFP*Dbt zRXY*%UQ*frsh6^l@2Pg>l9v&56Qo{_yYv+56~I+Oy^@P`kI}oDg*^!RFsi+v&|9eR zfAZ_51fxoEP`FX3*8_uu+7IY2RECKHTXr$WIlR2%1|5sZ4KvoM38 zZ}rp(P?#jtEZ|iwMe0AVB+r5xhP zj0Xw&;!+=if=8$hpj@c%f2vccC0w#JrsPx>mcpy11x-mV2sfWC_F0E8qTXF=(a$e$->8C6#fE*GeVsOY!vEjV5v~+ zfO$f#2c8t_9M*e+Zx-UzQ;fL;qnGAGVUbW5a31?VW4bM7;Sz#T-DOaCTByr`RYG0C zMcDrt>tQtu*AVoR40SCO)(iC+&Ra(?2K-qTZio?Xg2FzbHUK+?x*6Ce)Gb_cD?zu= z>UI|L6xL0ix(f==33WH;?I9Qq+sne|2}Z*jp>R~F`+-A3J-|f{#^@bp;SqvS`D0Kx zDb(YfcY+o4wQ`DurwPV`&O+gzLOln(E7TW&*M#~a@Ul>Ytnd=Sm<6w}@KwSn48NT7 zuS4N2p}xVXHwng~e4B;u5R7_a7N{Q!wF&q@sPA!+_X$S7e8|F&Sg5z*6Da&msQOa- zE9DuL_&Y27gP?CJ)X$*sjZi-a+J$-^Xc6icT(X&9Og{F1Y8wfSdR~CSS3>Pzg)a&E z20;Cqh5w3C_!bI33H3YRd!hcDi~J`>kNuzeBMEOB%0+&L!heN&kyF30LN`vq|H)4N zAK@mn2r2w8)ZZC@5Fn7kP9}z#VG*5_utF!5g(w#L*Opx3^4ENkUgq?;9aYX^S832% zpOPZ4ediOMZd;;f##E2D)L!Co`(19Q+uxjA;wg0|rW9zMgT>w(wO$tzOP#4H0k5ap zp~dlqGtueOY>897T2A6LM|N}l&gS}4&Gmbm>+75Am!(wuhbQJD-y6sbtqL7zniV<| zIvZ+CsVyI#l$F}{+>W+G2W%ytfZOk_X~tu$w`@#v`8>t+U|S*{sU;Uqbb6gGx2w#L zM`Z+$;0o!IaxE^a!0qxoOSKG_yR6hxr4?r6qM%2ExYVN+W#oEl9Dyq5aHKUyW^>)v z=DO#b>vuQT9oPI?i#D?;v^;b;bTV`-bOQC-kJ=q;stYxSPJ>Idg{c=7>~CFnI<3-E z;_$mX?v`vvNlC!#D5)v-I!avuUqfkHC35_MQfK*iJLVt$?3EsO8H>(h7LdsALVwqK z+|CKqdCrlU9(Sq7o$Ga$xqN=Kt`rTXKF}I6eSs;Zu18!xJkV!WWTzDGd{f(*l#*rB zdbMbAwiI_+rNdpS6{g$e74*FhttdSK38zdrHGlkjQg8I%9ndRG%+Pu@qq7Q~KEIae z^j7%FT_`kjvN^N{jm2<;4uwuPEeIXgLd!yj(F-S<=As8?X$vziY&+X>YG3OU3p(l> z&TCd}bh~EKI>`%dt=14fE~a@7 z+{FPm+GFah?MzI~rBO{!^*Y=Y`aq`2M2S-?w9$nN?diW>a}+e!uWGK})m(oNL)QAl z{?_FSF&>(qN(FZ~w3%68*sJ6E zpdpF*bj#Gl@_^rgdk&bS6{aQz{J{pdR)mBjx4C|`Tn%++(Sr8H3#jf?Vv9$cnI2l! zRL|25Q?2FLqLyRFssdBhwOVc2AxPyDqvuU{=c`X}_^}2`F>xSm~1=Mt8 zRt0xU74+?==VG88ZjZaB$`kMvS9?7( zYD}HmG}9O8Cu?CptyjC2oXYcEiyQbThFB)NSjt5M=gL(nZN&yz0mL^joYA0 zw3(@)hS1K?QVdu7nZ-2AOiKI_GX?+!{>hgi0sR>>_(Y zrMxb4Q-v{)VCL)Rbj;e)QMWzHX6PE_sBunc1(~^1QKr82%u#(nh6pzI951Xud4!{ za{^NSaxFooORzR`6RLu{=(5yVsmV;IR&*~59}YOR(ts3}V!5_1ToXM#HNRd8cY(uW zO=;H~;MU}r%)&Bige!v`J4H$9>Es;2jWC7DpkZ>7)v;WVgO&KX_R_j-%^0k6ll-{I zyBJ@at=Dv9QaS2d=80dfHRvn6U+=A2uQn|?BdHu_udJl}R5FanVPl#4{Tnq+y5>WaZ(m_T?!uRMyFZD zQLRw^6p;jj4V$@_mZp@#oIV3fExq?~ZJ|{(@dPA;k@DS^kGtIUu$+L!`8!|8fBnh$ z+6=O!X%6ocmpgvBRcw#nVIpVS@9G|S_~_9z_8IA}N|#&9D|ce?Elh1)IuDDhEmG6o zpN>W0oHQQfoN|<|#zK|LDkGAD(Xpq>EHni6#%l7=0$$Zzw}qCmT)I&+)oOZ7o0%Kh zim*h6CTH>AG3lQ%Y!{XsXg#x__4za0x*9oR!^k%X*}4a3(Y4XK2{7`Nj{b%4%_(DU z9yJ=?*=owk^E#_xei7eA!VcJofi2q3fnRF927a6Duc>yXRiVOuhqp!_=)R4obi)BV zy5MTP*G@O>~%(m*}eT>mT?s6CorFMtqAF2V-ctk6M>*jYG2 zL>ugKR%Q&@3sus*z)oB{oB#ziqr5hT6pCb7qeIl$s+~Rur zg;SDZ)OWp%6`K|!PDE|tnXrmB?`&;c*|z>UUh>&VwB$!}M`hO{?qM&ST_P`~7p5!r z!r3LkU4|E~Lri*-RyKQKtEE;hlnq#|ha*PH-ntd>@`7*}((mAv=$nKBdR}@$zz3IH zNl&yJ2|Uwk`5HrG2(WJ8;ov(EmK<-cpG)Bto{p%8?+|RLlvX_#CgVWU?4~*juP_f~ zK%Cij<{;|OI(HWW03M7U7>sj?7d)=yvaxb7rdt`fh;i%R%$=?@;1m+1cGm@X%vx}D8+hcL*< znRzcJC4$M}p_O>pVa&}lG_ph7TIV4?>gFLcwYVYkwL-GtPKRN*i-zpe97BSdf5?a0 z%po1p0$oPN%ve^~$K*!#F&csqL?bX0121ZY2rY~tLi>+ zTR12)eq5^&2#tU6wL{}3U5Rc#H#z0k_R0mx=~Yg@mv>K_E#u@Mu#drp!&i(Ag0s@; zV|y@e^i88~!6>LcV-)T!K91Rxlt9hQ$iPg(ZjdK%dXkZn^QqGv*lmO*94277U{1lJ zAy~(?oW-q2H)#G0Du*$oh%z5Gx=|Xnra3f>#y(3f`lB240i3jWAlcs339d@N$1T^v zdW;5Eo*aA`5e})))=IA>JAy)G+ooY_N1eG`cEp1#hveozwA-q-Y_^PvueGW_=0~jQTZrbiX>sX=4s43tW%7B2*3NykqC5%;HAF#CSu&#EryDl8bPmck zVyP^Llf{lnS~I>cqqXz?C@+`r@$t_e`aMl9LdT5WIb*KB zJ-#-b_Y0ni8aQoh=ep>MSQ1c?4Bm`o*&3Cw_Q}15# z9d;cuMLRQG1^3AeM{4Vm#G0}V z2%uiq90+JoO&RA{#bYRj#cRzxZjyzZr3)FurF0kA`9X*i*% zz(2Tfd5KZRPocF-5742$ZQTlFX6;YIUz zAKUiM1BN4(_b!(AM%P?=5v$vZ0LyAs$7*Y(*`eht-N*RxMWf#+-0>9IG5a@f{@U{} zEnnEL$nu4`lHrRqSJqB_&@_1hAw%_%zidL&vUu;Ch7fqVvy8Si^3s8=f(;h|XPMmi z;_8v$&NA|W^^2vYGB!8Hw#M4!qVzS-(xgNL4Hv*iL2V0nx9*%B)o#3GkbiQy4b(4d zGxS|ewy#pIw=VBT4T19bcc`a5ljo-0=XD^~kF$1Nh@Qt!kxhz%dn(C2;h+l6d6)D} z^?NwRMHvjm)1jl}kjWk*0%GR3BNhgopT24~^2D-hj`6-4ZI- z^-;YO`%W6J60+ z>fOVqjFT1BI%hf0Y&sENYn-m#AHZg+9BsVpUcIV=E{0XDT_;GT5!}4^+QEzBmk+ua zJy2EOqe+Q+lM)@}IOmWyy4x*?-()hl3+W=dbO!gqjv!)ON+B*{U7UT&h!KS`TFcOa zC(wq&Fe```Q(KQ7!u}y#mGbeo>$O0I?A+n#ZnD|>bJ2)y2qD6={rcDLN9A%kU=a(@*LHcpB993W;n{M7C#V=a$tFzk*-f)~dfQvh&cWeM+v5=N)tY3y_K%v9yb4*9)U7oF1 zckretRr@(okn>>BpH*={iY;IK^1+*;8am3?tT)uohkiCY&O9m9PLWqIGpQtzJ`#W*QH_MI9%N^Ro<_XD5fQ>`d)u5P99~D&HD%iF#fM7lvaUmS@4mfh(O_)v z^c9J-Iwu-=#n`B-cDZV}Ue#1PoBLFk+s(7YhD#n7>__QJj!c?6FpNk!(9KCBx%US91;#$@}Gtm{?g``{oXakPM z)>7^1WV7yQRL@T!z2V_ikR#8#5s$*oU{5z&I{nP{wWp&NLpTyipB0`mcN$a1FpLvi zZeInOa%J?C8S%qeeH@bRr{)yo;~dLbQtpy4Vt@*q%O}-BY6wcS!knmBAPE|1qCLN) zJh?6yVd zlm+2_iJEL_pX;r(cDpQMPjkGEvVfdu-J@d))9-fzbNzGk&!sH$QcI24EN zlv!A0_sRgw8r3|M4>nXGo*b$bO*ZpAa)Y}l7L_MoF1jC`fjNh2j>!rg?(xjTe%Wx7 z^_#+t(fmb!y8mW*e|bEH$8V8m`cK0mBgE4dPHH(dzis^*`MVnLI2yw@iRzK@)FE() z*6w#2=PLP5_yl=sbX8{5-0d>I4E=$PnvAh*1r^e5$kTWqf<2$du;+9C0AZr!sE3~gi+!xXtMyaGtdibGLmEEL)>~H&H((QT2BO%K~M`@ros)>WM z_v=ke;a3E6n5EY%5hq=kKla2xC3>7{mZo3Jy?4*! zGV;*#$kyQv6T(>P`4}%lcL+A zem~|&@J|6tT5z^JTV$IQIToaik-c@>w$}BHe_F-6!!gS+BW5vz^NAQ~x12@S$lbEB zi|hL1eDrbxN0E3{GeuvOHol}PA4U)BIb=d{LIxL)$a_I-d?nSxCUhyL{r<@M%~5YnQX3HP~m)hqAOkw;K*)lxS zu?O?hmJ%m(;a8P&u?Ir|th8qk{K$>`@uoU-ZTpU8*v7Zd<2UVm$aL2%s*kLh?Q_1= zyOtk_O7&@ss+4KTN2nBDElWc^A!VCflmm^oPq;gA}wlJCcHL~q1;ODR-1z#A{-${q7{Df%3e(RlH%nP_OP;VGSXS4Nw= zJb5LTWM8{{Sxj;|vpz{HQdVE?=^mZAC@gQvLzQ&>!-sVFl9j#r18F$Wbymp^ zHa-r*VH+2K&DKlG32!J-bSb}Hf=!u48z8i12_5!sY_5OO7!SKFuUnTK9LLDF5r-o7 zCAIy;_O>*GBDYA!Dfx4Ilr`N4yvWU>rL%=c0>4;QT#g}pu;kGgEiR< z+f;+?2v1+ymn7rYQRToFLau2z2bR{W2G5=Z{^=w)I2&^wr@&MM^LTQ%_N`}HPCe6d z>}2~HydXM(b6|=G<$P$a=hs^>C#&f^YmzjOm>v3E()DNJSz%*H{^f!26qeq6-%9xz zPUEtcQ){9Rimlyka@yQv^lz5zpAvj1<8@VgD&;QodXqk{Wdt&O>>}MZ+BpJAwev|; zoXzgV9#ei>RS>m{z*#pJkX_GL6pWx*@>>H;7=5pdS#K1?A)?%8=q6DQ4%_GMm(C~3 zF3Md6f^yBMx#4z_yL`ujXO*W$~O-a=}RbH#qyB+5DxHZ z50xn$YQu)M1?$^aKN&UclgCq&f+G)jyTkY=znwdbZ!9q0jmVT18E+4@&|Z9W zfzRk*tm~vtoD`nQ7iO=-mqi`;s&3WUn0b8ZU9!V6sK4MJ`BewzfO@`e@SZdz$ zr0lT?WGm7Vyj4y%d^Wz6);YdS#&}-5qC{3y#Duro|5wC>QpQJLvKjbT#MsefV#Ud? zo#>;l@BmwT&Oo@|x2thnc^^Jve1xo7{2Ng#&Dv?KzS5HU-GeOc zW!A2f;%o2K&z+L+F(Ni@ctL>v>|d?dRQkFR75mYjcJ5ER-Hok)hVLg3DdCL5Dc{t{ zRH01K%P*P2OWVcBgL#lCdD4oC9&9~$1n)iVmDnHZIWs4O);BE*oxV{Eof#fFAP+Ow zooYR>F5H_Ka*gUMZNXJ~&1-Y@O3Mlqd!`oSA*l6$=$9V@-%3QSZ8+$|S9W%rYYIL9 zz(*Shkp`T{48Tv3{E@L7RyekbFjUx;;7n3F3*_J!Zv`f+{PY#akn%9HX;xIV61!jd zW1rU>Wwm-vrN2nf7JiznRA|rJ-%fDMz)4u?h!Rg##{>AR1_uVkc!gZ$@E2D(rg^-@ zk2t;ZtJVhdpL6leZa8;S#*~1ovb5M)?I|f&%u^d~xHzHI;de}N_?*S^%ajb8(__yW zJv<3tGiQ`KeI+pR^6T6$@D20iVVtSSOf6r2@S3daYSyWa3a6&$%RAnvp>~>chNB9v zU4U$kAvt#QU}MrY8F06H`Jx{l+h@4wEeo9_62U-;3_ty6XGJymt8PF3AY zWFvBHA~Jy8M0O(E$3&*29D-JY)`B)DQb%-&q}mED6jeV^g9L*G zHwbPN+$0zxxLGh%aEstp!EJ)u1$PMU6x=1aTX2uyUcr5WVS*$ zfm@I%$P#1=h6{28BLumE`_cVR6Xi)NUr-bGcvGQb}2qy_9BX1T_nWV}EenEhtbf|)83NV8x zD0v}4r64SrDwxJ#z3KAU6GSs5?+=1U1dj?H6Fkmfy(-abWKgxBM(~8-Nut>?g{MR~ zhiGn0IFG27Q#4<&K(J7-h{4ZWEb2>$(1nn&@EJxc^wYAKa5)QA{BSZSNp@h3L{DlJ~0MHO8eF1#hsrDy44yrr@pEJ>M3EcZlAN38O@3q~v>o zKMDRUcwg{=;6uSj46f$KM1P5;{z~+T6!}!}H<+t^@bB{Uvtn*F(dVex1)_$S`Z=OT z(Q6W%Z_dNR_<0uvUkJWrsEmJN4!KH%|04RTS&zOZ`X4;#XQFRn!f&PIcY^N)KL~yl z{9Evo;6DuYp#S1QHlkm|z+VNwF(6jOR4EOcK{mVOnF2>FuNB!^i?EI05<%NokxThz zw#&%YPVz1nToEgBr4(t;XqQj64lr;D**XH#$<_&YfNY(C6tZ0{>Rkj~A-tPx-GISl zyGDf93a%4$7xWM~1qp(lf?k5&f;CjJ8MqHcW(so0$&~~HL z?i0h<(byDGXeZEhzU42`b4JmLgLHvG`}zXa?D4O5Ps?k1+UVkCN@N zSn6@IRY{Rq0!%UL={15U1Wz(peKy&iilyd|ZLSoV7ZcWso%6}IK=KyG@)k*+#J?*> zaLTe;;-77)6j>&a_-9)osb>Yx304YL2_*j6)<|ltV4Yw+gB~W^1~F-)V3T08V2fa@ zV4GmOV25C*Kmwml+|>5G2=@pi{@D=wR736)Nc^)&{IeYt;R^zZf40MtIwCkKI3_qQ zI3YMGI3+kOcv0|@AR>5K@QUD7!E1uo1#bxcD0ovK@z3_Qq}~y{D?sd16_fa9drwjl z|7;TfZ10Qk1A)Xp+ee&IPNdZTBKRwIJVCO3BB@UWe-r#&a8~e{;B!HP;GCclcbURn zY-=Lhc@bU^ToimE_!1>2lIh04`ln7q`nd$_9+7q{}jFvd@J})@I3?W#6^A- z{2R9(K(?QNOUd>h-1;{$>1V-z$@Yts{FQMf6z~LtpkR_=lN6FlXlJySp=LNxqzxIZ zB(HUgyh}vbmT{FwQkOyD8Zz1e?a8=YgjWczWLQtX3Prk*(E;d0SD~6lM}{(@Gg4Pu zDV3*$h<^&*THNJYlvgby*S2f@pYA~LjRO=yu< zEDGLuAsbObMkxz9P$@vJC3C+eFewMwv>(OEj&SPO+ZGS*4nddYi2QX2#t8CUm{6mDkhB4Z1%os6v_ z+$KeiNlN0Mv6E9)ZB?i4CF6N1vWL+{`p=N~XY3Q9R!96ZB4iu{j+5~MaD3TMgqo8Gv!_vD_gzXufHcP4l6grc=qog`9bcfro7GW2LWpy_wJV5qqfIG>K_-DVK z?AHOk$=)4EAbStdb26M=CDl{Vi=oG#y$=);$=+A;WKddp{Y5x{p$DFQAQWyU`yk*( zvJaLbH!!p>*>4iz5QbA`2K!Jb+(!0WB=1&60@@61ZNFV`M+@OyP{<@Z;-5W*?Dqh} z$bPSsypLfuYmx|)8CJ)oLcvA$G|5Y6B%F{`hQQ6RYMTXxJhEp4Bgj5nisUe?2jzumT)W-W{`axFp2CB0!3sW4?IlvheV;U#a%oioWQW! ztQZO9kakA?tJl3LQm8j2RbbLcnHBgvM_9rCuB*VJ(QzD$h z=%w9e9u$_4y%tzV_W4p|0mB+c694RrS!lJOihs+<{){LrWmuJ7F2WTI>n@0Y<{GlE z1Te-{LH%!JUkw~1`x;;u+1CP_$-WNQK=$==mpXQD=XBO*M? zuq-bey)oFTxKP)|~Yb6#h!~k0tLfj4t|~pNQ~N zhSf)Zhr+L9N9WkTBKv2+1+sq*G?KkR6wWcMfzTwv^9-wg7oqSEvVS3YUotFD{-+54 z#jx7?Ybg9k_WuFCBl|Z}3@N&Sam)&0LB{DomX{WrMq zRq7QVnU|7j1KN;j01h(kT+&1)re^(Evy}*2$KS=gM1*Zyl)Magxq{4gl6N_yuQZl< zr3l+Iv@K=_D0CyUBXBjDouo)-Mn530U?7=&f&OIn6TL)+^`HSFyk2gmiwqLsV1dLxt425BX19|$ z1h|FFo2AH5Mt=xZjcyg(#?X<(yaNh%lX<7)-Nn$eqIr)9?`3GOGlxN81er-d2ARn~ z8ks3lGL@mH3o~7WE{2{UOg9v=$;_0zEQZxs!$p|WLYNDM(PZ8a6p)!GMe?PHZkLfF z9L2C6GzJRe$b3NZ#32q1T^gxd{D?{y$2p0t(Z}oB~vm85CiNp@)+h7U5KeCsTr= zxtsH>R+9KuK;h6`6}=RnXgI7*BREW|0u#Y8G5Z@z72&aneRy6yA11j zXGHiO!|K03L*cJvz7Kpv<_A*bLx$zMAB*rW4D0EiK;iFXekyr?V^}SyUFtIyS~Eif zQmx252fc5}Yy|#6W)pCc%=2=W3ycBUh%ZF=CBwS)KcVn7ng5c!uNan{|0BY07<#K` zeg}o$$owAoH<>?35gE5u6a0j{|C0G1$@`gMJ?|G0{>sn_4noRK4jT&{24i4HNtpta zL5KH}mTOhY6l6MuuvPD0(Qw!nM zNOdPi7vLIlbmcrpH-=vQI<6Jrbqw7{jvh$$B!^S-5*T_eaP$&kZ-%AN7b(0(>IbBg zBN4cb9Q^^j&Km#>BFFUr-m48{SXK`f;SCI{?l&PdlpI4OPvW0;H^(g^yp>@+{dT18 zBgY-U-Q>7aib(v^Tv)^ zomG;W#juR15#bXIy{T}_MhdUxo&pwG=jXQjw<3`|{MIvP<$%y5-+zsYSlZ8J}mCcmsYqud6ptJLZGb4Fo(3zJ!$upv%W?Mte@rIh+4K+0lHA~VeDhDO!AwL+-jxLMt zJ5v=s89f!Mp_P)bgE)2^*e2lkzJ+MKPN{hc?viUMJOWpG9V9+-H`x(r_^P{<~@ zlI7u}M|p!@pWjzfiHBxIUceR7MWwJNXQba(=`D6<`TQltfpS=tm4~7MC*;Ke*q4sQQ zjlk`44(&3y?Un*xsLeeldYynSE(Y^Q7*pn zdC(jQKcf*wHqd~1=NO@Jt z>zvy2QKdTM6Ax5JCucc3H>l1W?G05rlfA*op;BMa3p2(W(G{=)t`j`~TWX_6oY5uG zgD~yrnK>}5%DFK6!sb)wj_+xFvi4$4U8A#qgL7zdQogb)c)c`BT2jd8_2d4jxPS|X zdbNW&D&a+AxErcBG*lnPuXAczWR@xs3}-|aM-S4DN7UH?((hfgTje zrQu4Cx^u7bxNUlJxH3}b$Gwr9q zcX+|M!$%S_&m|OWFghnpEFE@N6}wHs^wMFY*2`sWwyMAl1*N|7iqddGy>g7pZWx*H zX2Mxml{Z4Ha_5E1Bb)qEWmjIH+~en3XF%@pM|P{e$jA$*1UCQ%?@k`PYa426xfzgK z9@(L^lq0M^QxiRorq{zE`}~O&7q{S{(yf*c_17rtOy3C z&qz4i-i$T5`Gzi6**}be8qu~Oc=_TtjJtshSY=+;=uy{sHx)_V`t;yITz|? zb5Hh3fPLxFy6Cp(Vszm7lM5m4nQJq97NX+4A8%=`Jz-++PZOs1z99DX{;_9npDF*3 za&7-V@=dxe*PSneFpWj=WHTIS*Cp^FEBm&m_+nWuzL^hZ?H4a+faU@9*bObp^GvJJ68 zJ*xUhL-iKxIZxp^mW#VkXuB$}x^G)_ABL+nhn!!sw(-fs(tp{i|AyZ(s_eTS3ChWD zxa)McTR;CYp*B~~j`@{=qRFL!vT||sfln7`Yg4tgS*hM|WvHlBW*fiDU z`14NZ{v;UkIeK7ZWVxELOCm2+DqEZ>YTllK_8OfMD6foccNX_+SwfGD9$hy%J2#<2 zvr+Ik28=0oAsec!)pOPcJnwC=P^Q+LEh!Y`iNc~O>vQRY%rk>sG&`PeRrIncX zRQNmMT$o5b;|5#W*Bd`CvGh zEEqCG8YnB%SLX9O^Gm({5}4b#crM(rDQ0iik8elm+zhcdE4LKIE8yPjh;d0ZVBP5| z6BSZpHIX9X_sbfpH?sFDkB{rsM0yXE%8PD7bXO7n6#gAnkpx5H!lHeRCuzywX*Wed^A5#cPf;dgm_7qsAO7gpZ6)QX|Ie&m8T+8o^(pS`IF4e1vM|^TdssJL57r4)0M@q6#f@ zly@9j%P#RWDuXoyDz#rX_`>YQ1q;uwT6MAR6bhY-`RBFXm+|b(Ts@vLJrx}F6YT|f zcsi1i{c5PD;R^O(Of{|8+_ZX^b1LU?xX+I6f$OPQ#*4~C`_#CRmGt3}vk(88p}fvB zy=(5x-S14O&6HRlm^>o@PqQb-n`2LcIazwumy_qM2u~>UO?GAlf?nkoS^0r5+(IWq z1u05C(1~)9qbSJU0dt2#9@)-5ksnqrk=?jy>&11?H0@o&6T&FY_x(;&&5t9a&qrUd zykTYZBwXVlFO@H@+YZ&Znk83}`r|p{tX%4v<$D9+GF7uPaW(5&c^ow>7%dH*Uf}Uf zla@A5pru_ZEh{WsM)s&YGI;SS0keVGgsL2Z+t5&a=9TUxlID>Gop#FG( z50gthiQ(oIULPzncg9)Naw_E+)IT=Y8>lGr_6w;dRZU;y3`}(DSvE8#uAOrO72z^Z zQ0J-nsw*tUv4bP(7!XWNN~{;ivVmDMznCGcd_a^Ond$LS1`QSa#tm z>@DF1C@!Z)ww0&|gzd;ewFb486V^f^vDE^Ynj2li4u%;hx?eBJo96Fm+%_xD;1S$p z+^fCyWo?#TM~n!SsS(u58`py7V_(BsceeI3cPQxbmZAL;TlY~7hu?`omy$+ymchb2 z-ZdUkd!}k^bazU;L@Cvh&qfdNR*9!>%!cQVZN4yfU*nt=arWI;#a1b=OL|x?b%SPl zgW-?|D-@X6x`S@egqP!VaYCj3lrCT3vNl`SE!PwDg%au$UXJUD#KqO9TZ#^*E?(04 zX7aQ!%~v_o8`QhAwEVDdre4+gLLt=?BY0JZ4e}9L9$`y}#oTV$66VQbE_wvZxxJ{d zY>w5EZsX2)bE_Qp4w`U>GS}HU$9r=9(S%xS!#g&N*i@=&`js}gZa&KHaW@^h|=~my!H{Pf8nUg!@{CvGRNbdpLDjF%?^SVLfW;-erL&C3faIbVM)Fw4o*gGds6gy+#Es2;ncmhs%PV9oC{@&RH$AZkk+X5m_Be~v zChb^wpvbK(?!RNloo5#G#bQ~HT5PYqs;b|`8Qk`&LE7MSxAvQKpWmPAK;or%rX$Dj4gW|JzfUs$^t?znE(v3*r;#twoss59qOcVcxEmJ%-b5^`|Of z4T9Pcx)K@QvJkeRDqvo^Syw~_f=kv8Q{(disl@QnUSDKgFg5~Q$zBA+uv*DyCHuT( zfgloGm+WL;WINJogsA#pH~Spdgij}`55@JxaMm0Y8zZ^uR0s=v%u6^_nWe&h_W9>d z#0?c3tVnpxtZJ^)-Fy`{oeuZ;Lz7XV%lvUYGx)nxx@S_xN~J~?VAts_D)lK?Ykb}j9yFg1;Dg^k|9GjlNV zvCdWt@AXK|ik`y0uU?;5VxWy^T7b5xZEl`8C(Jn8T-W9a)tZZQgPxMGa>I7_p|aM_ z(e~@B=smYyT8?$&2+^77#`IO*vaI%;V&S%CeRI8>Qy=`@6E>(PsLcut*oX$ph(3#X z9LqlE!ZGKL&udz}Lj5ks+2G#hyTrB5Fm4?7Zp$gGrJxr<1yf4m9FXkcRRfM3<~7^koSK2M7>qm?bD|q@ znyKtodl9vtI(K9~cPJ*$<{3Vtimg>c(XzWf@rn#>UW-GqEKkQpAOCV2ik@~X`gL#R z)}S0ZId{c*b=#lc)LxyT<5c=ce~BlkBB?7QtSaUsHKHkw zoQ}s;jg^DCX6j&F?XUD|0XxMx=jT4xa;9{}YStVUtVt$0TZa)fL03LDaXKF0QY|4uuC!2v zCsNPxPhCpP8y`bvFS zpV;1vv+mNtRoc2_oT4l5awmt&&;?>&hEAv*eq#Gqcx9urWuC-2GnUWX7cF8I-z8Ef zy`knv#Ahs`n^$ky=T5C@oVOnRfwSDWmQ9Ow$sO9_<|Su-V4}~1{%F5JSD@FYahh?9 z)n-lWKrcC5RO-QD33{{Fr*X9(@X`^h_WEQ`1&I=UfX$a-^=2m{8S$Y6L!KKzmB9Rg z2EY`GK!FCpYn|iGn}@63(yKefg){T5t%JL1&r(#kX~he1M!RFN+UlU@Fo&C9ZCr*$ zcH&j*VYD?-nv1T-WKf_dN>@STcyO}UhryFx5XKdc9b8Sp@|oRbi`_WT_D|rwb%&Ah z%^a%J&77o{L|JkwX}vIVC51zk%2(iJ9S5pLP6{K97>7n4*HEnnIu8Yy|GULeMr4-O zRV(Vnc;h$gOm%G3Z{FOvdhhQ>{f_4K(Ew|GWQC?t=z72Mv5u{^{c2%x>G$){;z@e- zisRY|*7$YFwHV$A!?i$G5$cQ!O9-s~Yg?}9)GcC@@L~*&`q)Swaq-zxjeD13Gazp| z;@l-QHaeOoEX(u)A7X%tsvTd8cb2~AVS2WHo$1Qf`+yj_N` zNGz|i24qgIC$c+)7r4nD-}FET_7`Ahe;A8Tb+U-1n_7sfbJ_F@YCY3p<1maTA?58? z^A(nC)})eDu8!VuCW^Xr9uVK=Nd*3@e4o63QWWPuo5<7 z>x)yu<`)`sus=i*wfkC$<(4{pIC7>M&3u0A5*$68pDXX`WJ!5X%pqK7*+A=HT%M{- z`ytLW+oBI(TE1RL4A1w3i+zKVf}RO7aiwbP5mMM0@&urYGhQB_ z2ibo0CX*Zg8C+P64~0nG1fQq*9#~zP(K8mKwf=KWC%TdCk$+i&qBvMU%SZ!P{jc zDwm$mS*Q~N?=c>`sw(O%0%zM(d1a9e+m+kYi<#Mtb!#zL&mTL`GLEH}zW30lj{eGV zhK66fFB&b4O_M6F;QhTCt6gTtb&mZ_bu^S|y-dwi2kv^EFfId!&fan~xb?k(I`@+j z=%)-_mWP9-hiYJLt3Hpw98|@dGt_AjA4sojsF`i`kXtEOvr#S%mDHi?f!KVMe*Wl| zrWIS_I^aI`G+w?Y>q~vKnpBRLU#M5t!KfF%4^LMe5F8+Di}a+4`uzc4@rwgjWEO-R zT3sqzA}cxozJzZDFx~IOi0GEcBVzHHCt#dD{ZGHRW^MEP@q~B&jK<5>o4@e}CbAjW zB#CqLZYdSNo^lV~@B1d9&;4qyo1r>?3@(0m)*P9IF`-^ya3SU8zKTGZ+6{M`ti4_xy9~O0fu2#_dTo>L)(3EY zmph2PFn=jAGHz#s_ZL!x*9~&1>W)K5e5inp1RP24?=dEY~?!FJQ6n~+I(MzUQN=w-{DCi3{a1+SoNd|Vwp)eRD@s> z?*_AUYK9!tXG=1&r&wjnMi14%w?Z6~)z>(O8*0?4Mcf|3y2qplzdISlUt`F4S4aB? z@bZCodD$vR*R5@;U44G}?6{$kI*co(#=YybT-Lhz{5sh)86Qq0UYitGJ=6b-s-Etj z2V)-_$rQ|Pn^um%j)~UmFVJ5Qs43W~#VQt#mTP=TAnq3TML|JiE~|(5h6sWB(*nFi z*Y0|8%W}Nog|}Jfu5vf^UBd6~hKc){=vsWGG!G#O2RDeCYKx`5jX8)nbMSz@$W*iW zVI|m&S9SPI1`~a?imhXs9sRzr|| zXY8DoOE;{`^E$39lQi z9ac8Rrux=`KR=1@a&2mSIFiq09>BL$Q+Yn@{!rY=Ol)<#9+|27MT$EWpUvP?fuK6d zaix0wav5@`;%zgoW#f^G^&MyM@=SgB&`^UfXRtEfW6kA>JyLmwSsFKeQ_rKtrn&mDlZ7vxM509=svmkn6pfh@MP;{R<$X#{3 zao?)Aw$5_0?HFFC@x`+GOQ_A$rYj=~0}~5jgtGmuINN)_l?>ZmINnrW1G!zk2|-U8 zUWvo@MCVEjA@z$FR`})F4)X#wwwUU$SHgL$3P8wGZ}XplbF1(4_F`*?PZiEo#Tl2} z{>tyWy|Gsr*J&dE#TJXrkA!K?IXmC}YPe@Q&isl87X`{Mj>9JmIP@sQ=aA)|%EEHb zq(HE6syC=Ub*nRepNnq{n{(G^O$htSiVM9Jfud5=DExAAo8iTtO3wsO$Xlph{%5(o z0e9|@K`HnaGppDeDnhVOpAxE%N5&73OsC3p7N{@aQnju#aiV9k*QxW>9S1sLozpwr zQ;zqK&Wdml-wt9v3HZxq^iQ5yI-+s!qQL(@ diff --git a/modules/ingest-geoip/src/test/resources/GeoIP2-Enterprise-Test.mmdb b/modules/ingest-geoip/src/test/resources/GeoIP2-Enterprise-Test.mmdb index 837b725e9c1546824ad6ffe8455f643f84c889b0..16c1acf8002606fa09aba8bed259efb2c5cdca8e 100644 GIT binary patch delta 16 YcmZ4MyViHZXH}N8h1x$Re^=cG07R<@Pyhe` delta 16 YcmZ4MyViHZXH}NeIbtc3zpHKo06`uHp#T5? diff --git a/modules/ingest-geoip/src/test/resources/GeoIP2-ISP-Test.mmdb b/modules/ingest-geoip/src/test/resources/GeoIP2-ISP-Test.mmdb index d16b0eee4c5e5b8a80d34997d6dcd1db3af44c7f..a4277d0a55c47009b6e090f433fdc9e6c1c327fd 100644 GIT binary patch delta 21 dcmX?disi^DmWC~iGgMg87Ha>PK1YRd1psUO2}A$@ delta 21 dcmX?disi^DmWC~iGgMeo=ZK|DpQFOK0sv>g2$cW; diff --git a/modules/ingest-geoip/src/test/resources/GeoLite2-City-Test.mmdb b/modules/ingest-geoip/src/test/resources/GeoLite2-City-Test.mmdb index 0809201619b5915fa6189200e4c53e6088f437da..393efe464b6103ddc505efaece1aa0609a3efee0 100644 GIT binary patch literal 21117 zcmZ{q349bq7XNFydjLTZ0YT6}2LlYJ3SM}Ub23Q?F$X~j#vvJ!fn+AmAtbVENVsp5 zL+*t85DE7omyWvLPF(NRT{R>D-Bs7y_4@znz3$A!|NkF8%(v@RRrjl^SMOC-w-6Q~ ztPMgaz)2xgAuM4bG)`F=9T*)MolxXSAui)oXGRys<%}yBT^Uz0x-qU|bZ7Kn^kiJk z=*8&GxQ5Y((U);8<2ptRqaUL`LubS?1~9H?#4!dk1~CRRY>Xj{p^RaS;fxWCk&GJ{ zH!^NwjAD#t+|0OzaVz6C#_fzTj5`>2GVWsB&A5kgFJmkto{_*vWF#??87T}qBbDJ` zq%qPN8H{m^OvZRd7UMn)|3V?MIhDi6W#lm?F!C7_8TT_DU`%2ZFeWn|WE3(UVmO5m z?+P&mm@PyR3yT>p2>&X?R8Ey3RZ3PYE!oY&X#^<{(m` z@F^koL#0DPJS{}$-a;G@qKn3P4UB_?E|{%{sk$sBH8{#R7P;qfu5dyKWSjkYN{DB; zd_uoSNhx5KpiNRj_>kzWW`^bq1VF7i8E7yQT{K%$I$!ZJcwKxk%B7%GH=ghd0c z5f&>8J1{yjIx#L|bY^s6T+Xmj#}#52{TTftC3P+tD=Y&z?|MdDq{u)jBA+lgBD4w1kVtB%ungmRm{BMh zrI}@{$T1th*&BAFBA;&)p$3IKCuz0wLmr+6Jiiss3KOHm zt&DAq?Tj6aos3}xUS+(-c%AVE<4uCJ9;sj> z<1K7iU`v1J)Z2`A81FI=|73;t86Pk{Wc-8i5$?hZGt0-q@=q52i}4BLQ^sc~=@XVF zV6w1Wz%=}iurzaC3!{~Bk#ULfIpYh)zX>oW((FsdSGXrU^w)qQEdRx=e~{V2@(tr# zVfl_reoyG0z$wH(%P+$66Q_P={Ev$m{rM}3{2?sAao+FId5WNvVj=Wkqg7NW(OoDS z&`Bs(F4BR~kzhPsxeP_F6iR2{a?x`b(1ifa6~sTKtC^B{QrL}gRlB?NK*_#B=?U}} z%GF$?SGywDu&_@%;kBsHPbk-MUJU1Pqm}+F)Z0}UfC}+KxgHoLlsI6RPzC}vp$y^* zgWKI@2n&Z2%LloP%=1g z9OoI&8;`sRLdgPhgmNDj$tLt{;8ZRnk6>B|;-B(>P$qKT{q6E5v9O?>@Ie+95=fV8E(cj#&kv*qnzPkco`LphY45TLm(ymLJ34tL7J4xBSM)G z5zb^`C8J6xk5Zm8tF4H<6Hj~Pu{NPnEtJ_@vW8L1n8TRMn8%n;Ae9yfWnm=6@lRPS zlqER9*)Ei&==n{o3&%f&q3J8yFi|80OSwJbj>0wxHfWg;EEc6G}bMAe61Z9-(Xlb_!)XSJ*-5eHo{AF?O@? z2&eXgqZPI3HGj#J8PEyhPV zEfkJ_$}^PL$0*71PdUrNulX+LQQ=LYJPW)el;?mKgc9Ps=Lvm{UV4#*er4e{rD2RV@9R5z|*OOE4Fy1BfGDhWnRQQKb zKHxl#f95@nj{KPN`WXZ8FASq1luuCaTcLakTolS@K(kPq_%0W?g7Lf-7Pb<48v}O< z75*)h&pGc4g4v({LEhIw`I7Sx|Iie(v;ND%Z`xJ(4i$b8%J;xeLivG<{7C3OpHn|G zIR2SU`4tuZ5Xx_y_dCJp7gbP7wGjHB;1r~4FQKB#)hq4r^6-*)w` zV_^)zm`7@VR2U#so%3P|#(=AAU}_u*O|1u^f=j4_ffS+IfZK#R1h`SCLxB-O9mY2s zPKfQosgaBu2*&(VZ$gEeg*uA!*j<=;x3KV5f-&*b+fm_Op^gFW66zgXSXh($!LPh*j#|zZ~WC%5lOQsXd;TXrlOoBOZ z7!@@~sP}PRHempKBy3mBW#kde*5;$agF>AMOcLt-T;u_Q`Je(8P9~V$T8Ii$g!&NY zISJ-Hi&$9PPB;}6<_fh0ctog(f2vogZlFx4)40O)b`{E5=pmSWR)GqBp+3xczIGJ? zEDRD1Z==pYg}(}QCh(|GE4fG&;d-N&X0h-uTyGbr9z%s1p;mKhHo+X}S{BYBn2nx? z3af=WA6O>T1;ApVF65Gn+Lc87lf$u;V8kkQIVwCZ)D@h!vR#EISh$LSSwYp;pu#4h zVrj>sNV$ZH_B%#@ZGXB*IbqiOhBbe5+m4({~=3Nl~v^_%I31E()d(_W` zx*PbLQ1<{Qgt`|vDAawx(?aF=r|ze_nA7(F-@1ViXFTW-DjXFm$3OK5s$nH7D zI8HFV#z|B-FVs`O8KH9gQ%_S7vke^o)N>>>f~oo}Dm*XL=lGtXb`@S=;fn-QvzJie zA3}W@cw4Bi0F6R@6?j9auW^Of3FhQ_lZ9czz~4Ca7K7uTIi23YUEUY!yPWqP!OZ)B zg&(r;YfgQH3Qa=&82Ch}|77962u7S%KV{)(1k-0-K!sMJHgjGJp^tITi!8iEFvsW% z3~^_n{u}jv5$b<{?}Yj#@L!>R#TB@^IT5~L;kN{{4d0`}PeT2H^L`|lljLU>{*Pex z^{;rWDpd5DhN{1Fkv|BAW}1aknnD=FP0>(V>nJoU=XD?$ZPz-n@G^q&bgc_2^b^|U zKyRU40rV7FSKun4UC9-?5zNPSXJL8P+sXl1~oLMsOXLh}F*3(dvCvO2bqS)vOrbr( zc{2$8=X0u(QAHTadD<*gs1@2@fNG)rm5V$^FsIXO7S<3?tFbu6rJCq(?yb_#7f=j|Yvjo!t=-R&wM{#kDl+CJ3# zQfN;BZwhTca9(In14o5+05~MH2CjaPVCRb0ib|=aO#|%v-<9!uJTqT0{E)6+RN$hn)8hg896U zS@=(aIewp@LaWd|1uh8fGcM9ZFm1P)g)Id0=@(Jq3!z=&yw3^dy!|%||HBnJa_TFf zyU@Nyy+4HZU*IR9eFJepdmuGHSbGB33agBN*9dDbDq`(TFiZAfVc&Mb>yYX%tTCL|k6ums@cRRt<`VJP}Nif@fH&SDT^&ZaS_-B}#HJ*hD1oP=hNTmyF zGLR~)DO|)(Fy@2R!NN2`zjrv50pL~NI8J49-Y!nD@mcR{C(J==im>JalY})7m?*3h zsE9S6V0dWj{VaTdV7NSM0aAs+I+^nxB$y3*h=ooTvhi7qkeVi}#XyO$y12+xE^>@h zr3^R0wASfJd4#o$^U4Xvg4OC}VFe3~2l+fLDEUxyU?%*)I!NxR8Zkb80bw zqs}FqT1qgLSkA%~1Y=WSeH^KJVSNHvFRZHoyy{y`MXYNGrtxw7v#ukdspkfyHVf-U zuCR$<_U9HB))CAKTanr$tlNN{!n&P{>>!x+3_IIR!VzzAk-flvVco~6rwC?GApXg5 zI6xR_^vg-44obPO9%3A39AO+~923^#TwRtt!9rZN|7%SuatA6uODl1CJDuh>!K^#t4=mnYDaROBsoB_!wR9Yck_;d-x&3B|6I-L1HK0Pa8nsa^(8467awfia3;0P zTjUJ5y`Br>okc}KpR=g4(B~|62mSTMsb$Cs1dCmzV;!GRe>=*&o)Q+F`;JGz5pbiw ztGpi9IO;st4H;ffvDcGQ;q?ccWuNM!6UR@yDck2Nar*;kYcZNnJ#qm(GP3i|T(`f_ z?J0E0j(0gdg+ULLv2@gTC!}Ok52dB}oSx}Mr=-Y4kxS3F(}mlq7sOuFoq0{QtD9=~ zG}RtP^PA@$Y<_$R+NKAnRA`q|pOqQjjvhek8|NAAZ#ldgWitH1DaGzb+^EwIph`E;KNhX;*oI4gi8+$D3ZqRe5tm+S+_OmqZ8tDq)NVF)ECL|ez(h`$0UJHJ%7MgdV#!j=}t#e&DN%x6Zq8w z$)VXYdsb?AMfkYU+6$-F!y5HYa)6H70gI2k+ULTutzrB`fT>DW7~ z$v$FBO2Dak(--p7gN)bSsvOet6;?I8f(L+(b0y*XSAMKduh*8 z%?+o~87Tpu(;e`Cnho=Fdc2;>a&ORISmE=|th97&(Jh}rH(P#xy;rNAl)@IR$HYyB z;mL##leHjjnw)pi8ys$`U5H;j0MV?B@CMAcGZNb6pzD}>Fo~bXE9piYmG4syX zZ$-zPJO0kpH{-wQ5nB~IC$^4E&-E!rCNGD2A~7!~UXK}PxPhDiOwX_9XC`A>6?^^c z2cSlJXd$xv(hvCMOn@EK){`UH*SuoE#rknaqEd1oF?PGl2g|L5Imef|*s=>8@n!P5#6t?hj^M~^=Ul4ZcBy7(lqWN4 zltSaI@Dt=VPDeTO=Cj9*0hMNC`(>ZLJ02>0rpM%k)=0lw5;_(jEfS^ot<($h6TRhu z&>p>baQgy=?f37qUGLSRC#5Hr!XcCtmA?ezb5Kd-2{FjVcC-o(RUx}JU$zN}Kuk+mx%TX!J-TiFt%kyHZ(#gG~I(<{zp4hs< z_0qm&cl7Vk6VDwriuxlx&0Xg9=sBe>803L zm8KSz!bEhW$Skx0bI>Mu^j0_3Y$MZ_<{UM8HsRN2Wrw#T7|Vcs4*yQb7yxs>xcpG_ z*~QIIpXJ6?N|PH({!+-sJy>n{PHx;dIKDDx?BfWl$)j(+anwz*bvBWb<8xKOC1F+5 zF1*IQsPB$@P45-gm=vh2aHW<*;egXu*~;!MJ2^z|&5;B5pX7Fxc=Z^YM(}!O3^cn2E3oXy3QUKSg_p%p$9<^h!)2Pz zRXSTZSFExk&Q%X2Cg;WLvjz^-mkb;kJEl1{u^tYwbnMs(Z!LJE;!cd-`N^q?!*^88 zOe=Qyh()qcrEnC77QS>Evof4!_N^l$5nu)<}ce zuIFd-$jUKul^KHu1AAU~+AQx&gF+ox}ocCJlj=v=7XyS zXXie&*QRXVs*aAWvMEm+eRlikTW*f6)7$3z#Agc8W%=l`7Cj~{--!j7r$p+UZ`1nf z1vwNjp^6@rC09EKWIU4AzCg}i^b@A+I&R|uCjdm`{XkNOkb;E{B_2*FLLd0`i z=XxHGP0OOjC#N|p{N$nEQ>8y}&g_#ly5AkKRcXAI@=mYx!d|rMXhqa`D3Z?0=ge$ZMQ}=) zd%B+P^|>$&({sE*OhZoP=O#+&a2=&Wr%;T>NG})%acB=s#GD}JVMg=zU6-~z(bBMz zeDeg#cK@j!Oert?boiJ#1=o``9i`pIr7e3f zs!e?Yr_rX|eCphkT&H^m^{(|xtjJ`FR;{=S9grE$l;-6PEho>ywon45x>be{I0(4` zKxndc=oY044tC_9ROP|3d}+iI+_}Q#mb!c#(Pi*jgAcg*T zf=o?r3}M*lkB)j&f81(35f3j|wD-i0HzvhaWpZrAZ42eS(Ip+eglDi>Zq#iy%UpdO zTozB;QRD*)M!k~1YrdSelO12a`peZvY1+c~MW*ejn{JfhOS(Fn@-~_#m&!6;-|%xC znwH7?fmHbT8Lkpq>d8wdmQQwE1YIR^Gk~i{Lc2@g?yv`3CcT`w;4_zcwvMoXtnel> zMuZ5&Dt2!zOZGPJo*mWh@zm#J2XV$_Rl2d#8}BcZ^Q_~`QA42p^=s(q$Y7gv_aI0oyQY7JLvcGZXrcpB0T3O;{q&T0s7h@H=T0pguT8!J9LVQA%fJw z#Ai3o=KghDI5!6y-j+28ByDablN2K5?l&%>&2d8;-j8P)TbL}i>*OqN8BIQrjzR3W z@XB>V0bQ9Jmrc}BbOckoFlyPx1RJZI7F8YYhO5FE2X_sBVu1IH$1=o5^>leAxvt0|naIa=l{N9b%` zuqR5_$DDn*V!r(qzHVR!_0ioi`|FFha`_7LFM6^x~qY+K&?6 zFUY^x5?kj5u~p*@#Zz5ne(dyRu)O>OLwV`UFWWp;&b|9d@p1VscW4VdmSN-e1Q#M= z5WA`L1h=cq>%+CwKO?~%+JmH=0cZn`3)YeLblOvMJgVo%QN0muvmi^JsbMV(KZE0` z*)rH?w5~f7H5qV3#W6Om+Gt1V*t_T|$}x^}d;HVUlq=Rmjl;J7b+cS3Ef&PK` z!DNN+ksaDYDlPA|$_PpsbY`73Nx?xD|c}1upja4S)SrZPL4p4;00nVbf2kU`Ul*AO1Gb}T_ z4X3?QSGl8*+tLdsm(ak$4Yqmd)N0ZY3U(U`mAYjcDz!UCOn3HN{=sjz!HJx=F8{qD zu*=7Xb$Wf2#diMXRl{OaIliRic}ko<#6Ej!P+rNiD0$_hqgA_HJNk?nw_Li68pJ>B zAWug%hidw`HS!IE+!+}$9h>Y0tqa$+_jdNS)^vF@;w}<`#m)+xkfVK9_4rJ#{h~%F zb>hPhtuDJlI>-U`KPKLhb274IncyrARz)>&(D;K!6O;Ma)SiQl7yEHsA+G1!ll>LW z&_N6pT~WlsRvvb?o{SICoiaS~bPWqKIlrVi87>@$@~zAFqg_!$lw{ga>G^KPB^vTW zlwMs%RvUUH;6(!L$B_^$LywbYsm8V3w|74vR~|+l`8u4sV;M_5-!96~9YXcg_tMWH zmSQp6wxY_oaBh>_vM<9m-T@O z_81sB?o0?4l{#_Igi*WxXjEH=K6lb=E1!Exzh1&e_;eM->#NiRxbzY^anW3vEq#D= zeQ@n)F5X?8Zd(HT8Vp?;J{rdYP&C>{8`%{1 zw<6wDo5#qG=8an}R-HQtBWCj#&!5n-@j;|yFhbm~krS899E$&G!IB!9ZB#@*ufV&@ z*^M=kS-fLM^Tvih&EmanYnGwrn#Ek4Pryifq!smi$Ix5`{x07yLNCYjDiX)nQ;b<@ z=S!*b)%(!HMh>}9oUS3nW97ZL{rXB)Z*!q5VD<+i^K<;AC(kuEtinQ^-|R#gN0Mpz zHtBjyb#vWt+%ooEy*%0|2DL)n41OzX&u~LsoYchFGhBRy7eAr>@)D;wluJ8KOj2lz zeP^^)*m@Poqw5WD@^s<=H-G7wWm3U?(aXTGpSN{sym4t9J0!T=zMyoCz3z!Jg&{w_ zV3UF%tdppw}O@mhd2(6Z#lFMt%S9=EMADW#BFviT{Ie+ zQr)hh+3rebvD`6VbEjcLa#9xll7WV1n~~CmS0%K$%aK=dO7^)|zCa^6op(P&Gg7KA zcl0=_e?&(94CpJ*+7Gg78O`;ZF$G(nInqAzq?Eq(;K$Y>QoEaj&)ypjmqr|(3<3Di zK+eNHaZy8|zA6t@(u|kEY4QaENApRkILLLC%MLa_iNHA<7l6+;N=kz_E79swezO6e zQcW8mv}QRS@@{FWoo9}RLzXwZOBRk}Sy5yg1R)TS9 z99_njUJmHafJVdaI>YoiZ{ZC&q{{d<$yznQXs zitzD(&t2gyle^5mKN@x|k1a;rzSQuI4qkzzIQXP0#_sT8k14<3$cx%V;P9Ud$gbxr zT8yAU^8G6e#@H*v>dk^UM3nms!zCJv!`1}{W$=mei*i?iMY(QjZibyCc{VJKp*B`ZVH}5blB;Pnppsz!C7R$r&BRIIHJyeDasP&s$7H@1_GcU^Qlg3h$ zlCCfE4JL1yWO z6frEHFA5=i8QxEUQTSa`T17}?$W8%a%Q@%KR6~4B( zgfDkipKE8w9psC^vFX%bh>!f*2DadoHXI|D-DZy|`{v0W8%Mq(HQrb5V#jCaOX;2a zeI(a-KD;MIR@90K-+TODD<+gOKZKFZz}rxBN0WgWC%*up4`JE{*w)>H<$fOvwpui( z_v2%&N64G?J0CUEY#nYlW?B-zdvGM-a~*ua;KRX(J;~*vt9(Zi-i+h2NIK*sthY!k z59#G+7)`bKVh3x}1EvqQ^gpor-q58AczL*g9p#R^4ZuJ0;??F>KuM zf&l&5f2yIWjCCWYA4GpTxIY~Z54Hk2-k)Qogfj}4d`Tly`7$M6Oi~Ije3v25NXe5{ zD0;a0@G-nIb(CR$XynWq7v9*oG<;^b9zHuFd`KQcxGp ze>KiI6DMKCBa6J{mnPw>3LF>|;`MX6Gf-IWoaXfvKH~DpFX!r&Kj-30#kSnK^eI7i zS#hDO!dq0TDO2l*{WPxF8E{T<`dx+c%Y<=ump21}ebk6Ve8rhw?D7}E&C9P6<+q!Y zhnv~D%+_<|hjmFtecd+IIo+kBgd=N|F7G;AhlaXqrn4Mx<@Ac64<91JLwG%9l|$0p s6>Tbaa3Cy{1YtRTi>A*=^Wbw({x&bMVs|c%tLchDpRe#<80lRAYJOBUy literal 20809 zcmZ{q349bq_Qz|wdjL^XKu|G-C|o2F8tyn;17UZeiTYxQ%f;;||81jJp_jGsZF!8A*&}MhYX9k;ZT_ z(iv_>1|yS^#mHvlFvc-*8TX+1e=kHHr}7yEj6%kE#stPh#=VS5jLD2CjH!(K7)6Zx z86F|TyFyF@YK16fVF|;8@IfJ_bE*`nGKyju$zB%DAV`6jNqM52QNi#r{6bVlOU`0p zK!{*eSS3V=@A&{@He(K>nm`Yl%j&-q0*-=&gnuB|8-@5|R9M48SXT!FUlpPrK4m*)M6o)L{m#yZy95`5Gz>y(P)vCoVSXxT8K52*9|w4 zmOloQ4hr!&r`ARF)(i0j3!h|cU~FV;Vl)uQ1)GJ~!osal;Wi<*vkH1(noFL9ASF3c$4)ajONZfJdB?AHsddhcL*}$J>2ZSEc~kw?|15nzX|bo{B7y>1N^_ql58Jf-Hsq65?M%e8zeIW_%va`$C8>S@;#> zKa8)VMZV#?d@ICvocBHBhiH)>xyVn19%F>~8DkcD<1YZ5{40g_d*s@ue6BUkQ zyXM2!~{E<<^sAbeK>KP9)9%k_TXM2QG^BD^m3kl+WVOz{5EnzHW zEMqKZtYAFKSjkw$Sj||&;QnWOj8l&@)-l#Io?!6&XXE+LwvmOK7!8cgj4h05~07)KeH`_N*x}x{>*rl@fzcG#v6<`ahJ*5(h*^6 zX5m|mw;6w7yn~Xt!uBqw-b1%PBW!==)cXv~ebT_cGd^H^$oPoy4+6r8N`B1v1h>9K z*jjMUZ-nhE(7`6PGTMZ#olBl0Tnt;}gZ_z<{}#4SIrT5bXM~=r7k`c-UkTe6ocCp1 z-hWv5HKEsCocb2F*oEyo==~&Y-?Q)s#*YN+>HkGZP1t@$$zMgU2=EI5mT3ZcHY+9b zq)=xVv3JqBpkxoBbp^T$ts56PFRsY>EW992cp(&e3hg4!yO^LVt@UDI?>L1%P`E*8 zeSxclb}2AGXqN%~h1QQ1E|0s*6)e1xpxmPkgu)=9IXEvNPGK+$hY*yPw4qQKDYRk0 z)k1S}k>PPgu3_PbIN`NWxL#2?<05i3Od2<C(DKr0YhVN^Jtg%cPPg?2CBY*J?tsmpy#o7ySV?h{%OmsEKk zPN_N+bBcS8#`7N;Fq7ej^iBxvLEuTD{SjCy zv>MscLn@_7$4<`Mc{&#C!PSR}LsoLb0v%4dsN$n)Q&8mE>)VU5t1 z11p8Lf`yL~E?vVZp8vGfl(K3o!)=|=9^)d96E0JM!tR<}w0EfFfD%@CkMUQ6Rh7R%;X|SE{HJ|Dd6r*3VugPY25>NFA4B15p?v~$2(1NZ z6WUoW*-EfXYG>g&f^|>Kf7+Kq`xN-M(Ei0mc>c3Y`kaMd5UiVh#lrs(29Dv>H@M4x zh4wA*gV4TX;r9ef??)E?MChaX(a&_Ssr|xv9B!(?bsGtF?kmb_-GD-}(Cxqwp?3lL z2)!$Cq0qYl=L`Kj*6U6f$P=`F0SkK&RR7a2frNpP@|1r2!$y^9}nCs^a)&KB4H3ipTxq+1j~_Ap>V&@@8i58 zg4NO<7EU8rl|~QOgF-ifa-mNLyh1PKl4WrvG5=w})n^jaT%}h)p;G8R&hy79%wl1H zFj&QB6%>9a^bjyd=nrs_*#xT}@%*RHC86a(Isg4p=zm~^2MJcCxo_&Vad*M|XFMYG zhXC}khoSzm(B}c$g#HMyM(Fc_WkO#7EEYP?fBGW2i`9LX@bi`u1|R3tawx16`U*}x zN*IEBO82Z{tS0nPmaT=t2B9OM_4PvM`A=U*MXWmT{HH%jLN%G{8==*hDV6V`30#6Bj7c1;0SeQ&j}qq(@zQg1Pf0R`u)nO(~M^c)|mA?6kZg1nDbsBT&C{% z5)1!Cu-fPqD4Y}epMm#<{wna6&|d@I6#DC|@CL!^2oV-G6Ri5Z4TX1w{uj=BhhTM* z_gMH>g5~SKLE&Se{~h>9=pS&A4+)lK|6t)6!q7`O^$8SOh2FxcvjnT|Z7ggjSWoYO z!hePSPv9G&e+qmh^nU@L3;i=z_&33N>=!KjGVU(_Vd2+tCBMa8eh~V1ocBFpsA`oT zS@;vdvgK!VCqwAJ05+j>Ga7=53|mVuG=@%4-Zkt{=q8LVoY$40y0&p13%e6k=o=S6 zAzc_ffZ@Wp5V%|znE#Bv!nhddEsUP5*Nb31=n@w8;hQO2*z?9^oX7K@s*%wjHyb33 zD}aH*xRQ$uAPmRT5y^&wkw8#0i7^-oLxnMf^R6POQPCL2LMK5*opCi3ZWYEgz-VEN z07eO8B$vFF(5Hq|*D^gkVL_bQYEp)Np8cp)f}nGk{sbmXggNoZ9J^PdqCMvxV% z2x<&5c>XhHlh6tj%zs9WFy;b(5XSFV@Am}jo;?2L3T= z;4xvW;Ua4ZmX~<`GuDyNin=Gb^NzFd1i@;*Q&4zO7^i{fh4CyGd5&PkZkUBH5Ui)a1cg_G@h8rEnP7SF&n$eE zV2uo~L*WZyya9YDj5mRIg%JVX7Dh8GaCf>|)%Gtee1~A&`aLN8O&EXWy!Q!~oquQH z2L!d%Gd_YshcNyDd?Ji9T;yYd<$@OEwF%=a=d}{7=e4u&T%5u`q41e7KIOcB5w7{1 zQ~zdg%IaKSLI|C&@c##4VE!|{6~@;^%te(haY*p;B#$bKGD7YKWI&O4u=#sYf}7G6lO6fQ;zuaSBJ z*9m(sAVJuB19+Wx32>>f_W|&dr!PTSZNH3#{Rpb=_Wno>5cVrLkLN!XZuWsJbP%kk z4?=3Vunz`?3i}W)auq@K2m3Gp7*Sa>(Vs#qdY>B61_qzZd7 z7fB&ldTA_l5mdC=-AH8#dj{ua64Zj#p3TA>g7u(Wr2N8u4{*P*=K=Q$dp>}p&H@1S zD;V9;`hrx%UPZ8ikLN%8Y!X^_RwMO$ zVV}zizav=w`~wRgBv=YHNa1x}Er3^jbzG#LVCku<%p>7QUM1M)Beh7_7qG%YR^axt zWB!xvu!L|ecNqwToxgvvFPGWEzJl>6gTH^V2oFd=S=qpO5DYMUzOKq`l?z|iv1;KQfi^2Yq%&d!qN9! zQi++C8VdL;Jy75)Gsz4(TuIXdj(d`3c*eEV?`Ww%(Ne#srM|wUeno0!)ySkg0tl)bD`2#1FGF^ZeDGP=z@X z0vMRpQn$6G?&+5LT`hG-9U;d#ST-f{Xylp5@yLdbz*YQ|0yh&W`gG7l#6#;_9M+r^FiyHk727Bd01WTJ_m%j4X_fw9m8bk;$C%`C6R*z;n5Eue6)Ic;Cy-yx!Y|i$ z-(KJi7I}R|ru4t*@fC%9Fvix^u`?+xkNuR97V!9HTCPcxsbbkaF1qr1;EseZ;e^7L z`qeGPB4R2jwCZMGg#&gm@s3i7Fi1`5HXR1uw_x?s3Wo>@(fHn);u4E)i{=9 zo!fT$?1{at4=?JdZ)kNmTO4DO67!{9fos?;sfj_a>BId~aDm7T4%T6gOqg(nyQOY( zOWhItb%avGwX#GYlpa|gIjkyv_SiZEsbifqxc8Np!{12-sxKs!g{nOA&V%!D+q9%m zRk*>2dm}l+lh;zeM)rle({ObA(naKADzn9pM`T1+G}m*SA;!)gS#tKs(S)&Q6EZey zeG{gXjlI2w!Y092Hg^0bzO2oX6&O`e=B=nK3netlz_?&kVZw6>@4~9Qacq@4FH{lU z>f(N474sn2d`sb55HKyF2Nm(-Ghu&KE|assYb z-65;}Nw@Z&thrpM>^Raf{!!J^85e@=D6yKN5JC$P{tVW6XL zyBs<=-r=qxbuubtGGGQ_&X5(Fj?Lty zh7#!{_jAoTx4(W9oOJr|JJa7x{Hk|CZNj32Mv6=GV|b-7pS_Y?n4gFzXRGldzY1{~ zgcUid=zJyqAP*SQn9T43WCrDc5v0z7_^5B75o34j^7-c)=21&tbt&vii!?-bM3$og z+fOZpcu<&Nx3?Hh*n1?D|^(u_UpBbKs(n#5<54Y4cQj@NtWRqiwyKTeTj`h1*j~-yVWav1b z{6&RMekwaDJ-@hYwpl`*B`sfF1=I3FW-)?cdH`499heg2qw0>f)NQw(^9Y_}g}4ia zcFOXy`F2G1qq|yTNc)Nntq&jJ_REs(mwn^-@~;Ob$RHne`?Htd^vS0QwYh3^%&+nn z&n)woS8$+@c(g!So1(1EOff@M!QwI=ZG0}589+=|qfyfn%S{gZ=>d0QxlEM$*la`> zW`O#KI%>9b)NPL${FF|av^zJlk_MR*F|$DHsl%#)WpFuv+U?kx2tz(W3lxUe$Pv3V ze5gv=;z*X`b~XGpA=zJ172fG685Umx4KEvXV85-7zHMmG%;Yi*9Hqr&FYcD3i95N> zBU2$cmJHhsce^`QA8vbWTkDZJSeFxNq>=d%bj*{@ixGSst4|}>;T&|1hpGPi-bCmp z=Wwg0CYO1Aa>gF^BYcBD(*q=u4#B{)hr$_RIVP>cpeN_PqmCs`(Kn{2rGByHz3z9; zk}XwKu;aOcSLP+uX3}t!?g>ou`Vtz4os-cooiniaC3yVk(d?bf3~#yD=g2QJeWkFq zb@>9sVq4VMe(&Fc(z)quY-Vm5idQ1GDGFnfvbVa^WF~4OJE~J0;-FvMQn!UdUxs^3 zlRCwVs8n8LE2efiy`RRvBXSx+e>k^nf9t75txuogI#$bQ#Fjz+WF7CT;Y(G=Y>buV zo`lCSNvDpv;kwb+Cp0?6>imFNi2)2trTEFZsB`2Q@>5Dxb)}hJ0kf+-f$DZ{_=!u0 zuy4|`z2#MYA2sR|(#qz=a6gT8avsY`gg1)O;`?LVmv`fxmuC~3+{xmk7mdrpCycKO~`1V`OCHG>c_7;bs?Pj?0u_-fyvpy!FHeZ>Jbx^PfivzRV46=)Fy=BaeHzYJVHd_%j@putpWJ03sE~YPI zf(Hu(U#aw*8FXrL0L-UR8#Q;tWYQ$OSB{5rUYReWqv13PosGul73K?gc1Eu1Oc|a^n)02x3lC32GJHVx(^Oo+9(1X;wcFaBc*+r? zJeupXB6|^faw4M@rBnCD^sL19j-I*ymvk9*o;m$;#|*eFp*DjjdjHI7KO#-PG1eSC z5$0s7C0|aSSs9vE?w#q#^ao5C7Mc0}5W+$wg9XV_KEi==;bSOB(E)R_ArJ4Qn8**w zkjQG?+SsviWn0q<8V|-(zV|npa&Rn+JRLb?MZ>ztDTKx|v`+5WxD%=|HA|`y@<(IF z8NO6C%QyX@a#^z%Vrtf}>I7<5Fo7MNR^ai@W=|VU@U%;&d3lA)@Lrh*XSXhEYCC$$ z5ptrC92z&v!3z;9m#!EZoyNN{24Tl8f7HRp@}=tFxOt`Ng+)ee)5njaa`^O3$~FC! z<>s)U^r-ZFvBN*zp+?x?#27c{`YS`_o`A}e!&MI$j8WAQqQTZFhNwZd1$~B=Gl?~9 z6hdK=vaiu;zb?i{{a?s~kJ7PVmYcEkbnG%_;y(;?%WE{dOaz_wGiYP^7cT19{&?Gw zy>Jnd(nTvN66Ib8(GL^sKg77`CiPsHFlFJcBir7XoKTy?<2-I%B<~L|*`J3^ay2H0 z`%yn^f=xMU`I(L(X0}<%YY2Jmku$OjS0S^M)}FY0AiSegPCwX*ES1YoYb9Z=A)H+v zgsHiaCn>xz{zML_)p*vRqB|Z6Aan4I03 z(RS%l5Z(z1HaPOy4DXd=1U5L!X*K*5{l(rwmY40f0=0Qqj$aruPRg~(;e%M4#F&Lc zJ+cKga*yRpwFy%XM}@kbj%k4bFHeGrd4;Nf=7oYmwLDFe*R&B!#T757NhH1RmM121 z*^W)9D=&PEO3L-~CUk(>=34f+`|Rn5u={FTvo~RE=a_-BKKYYQzgJy0I?tP?Y~n3q zu39Pjj1w`c?q^9s)(@w?@)r#% z92H<`i6}V{Q{C%=EvmY1wNH1salXR);Z%7k2c$HTQ6J!3v_c{Eb!^vhID`sB&qdtW z*O#ju&11Aa#&S~5dG5A{mSNs(oxd~2;?xIAjp)XO z%azEUV0yzF1JNGfN-{AYhU7XvGs$a~`vXW&U9ytA;hjj!9wO_5jqKx86FQivI~>y% z*`zr#+DCHbfe@DW7?*IKQY$C@toFxG#&i{&sqplgQPWu`oX^W&bU4lS`hqi2p$m#) zTISlXPpg(mp2U?ZEWn1-EH3j(SWAGJ7=m(==R#~ric!bhn6*GMbY!f#3yRCaJ7h7Uu>$r6C!HJz|#@wqIUC>$%m!a*`XThYqJ3!&gOoWrw4-DKZyz zoZ=2(N#=0IR23TGh6=1RhoinzHNH(=cqc8EpOgZu3Xw~PuC=+gQ6&6E@LRkvvcm!u7 zdui2x(}sneHaJ4*D2vWWeK9Ap1qYeZZn+bYyQ#BB7gK{`@a!Dn(`(3DIi6T{H##r6 zRhbue9+v63WZC;a-;AQq+L!&(RMi;Z^CrjUn5gdl)2k|~)73ncR_H7B1msNWN)O3P zIz*9|0X{Eu$;D;B>YK~tKvRd=6sJumVyZ^U0aY`3rY?6^YPEoU;{5gnkH?Rcu4v6V zrv+<}NeY#CDsdi+rntDvCvvqJ(|+j_XWhTLyoYqjAlI)GZ_7U#?aAXkC84=7)g3x+ zpQ>)EItz5=V+*I|16;}_M9`HQtn`HU(flVbdH%!e0xbR3QGjCIK!tpCM1;kk?2j_! z)Kq|T==NoMP{)|2!|OcxlkI`GsC0?yd=q2c`PbGc>ymJQE~Cqx6e>p(uzl$&A@}#r zo`+~jAqPYwNukN%S#Fsmw9khg4P`Tc1-QBe*UOcdv&NM zCy7#Zc1@Qd`BEo63G+iKx;!~RmcaM{2Ve-rL;(ljjn0YA%|ow~YITQsp?R^jb#S-s zU4`nltvwWDv^zRiTMg7X&EY0k8<%5|?d-JzMq8(A#s^s6F%+zlv4SYO`VVh)hA>Lo{FgJoztOwUqp zGF@3-m@bd4e6B20UE)nhc#ph>`KeR@PRGs|of?*7e;gYv&OYlAml=33a)eJgFllrg zUn-5=9lHn}`_E1%C#p*|!zG#CKu8X&eS$F|GW@$2Rfyy$H)rN}@hX6}jB=*S%rUFV zI4M_fm=WZvlsaUtr#d(?31_mjT}G_%TwY^!$edhHcux@TZj(ITIsPE*FTl?J2o|66 zToFq*xe%47vT5h!dM0k;FqQ@(8SR(S6^0{gP)V$ir|virMP2&#kM;Rvxed;=4w5oM zv+v#~#{g3Z|a;*7BKUB-Ou4^c$!zSd#6B@Z8tHrK(;?TssN z^w7S5U(@lD^3G^LxXzG)*0DH0RhrfwW14N5d{vjP782R{o=}N*WMaTGjfcD}5;#a8 zV>*Yg93-xi&K}2w9YK#DsyN}L{`m~qFJEGk^H<`+ay}Ho4b!}y&U;{WX+#<@6V%gd zHS||&jk%nbBKvth8`rk~anu!kp>5FuykL##1=jG6MoClkSFx4nt@f12UH+wYDz;?z zE&3@7mCoy&eNDVDQ16)JC3W7*R-tlf`ILn^Vd6bbeOFdRo<-nf`vF>6WWjbBHv8Dj ztk#AN=&bF>55~`9X=QKS_o02b44fN6&)pq~ltqV0Ij`W=z3i)(&5mgt{Z)B1lwrL` z&5+0KYMn4S9Y@Y)1sraDY9NpOxCGiMU6rNoVCkV6Slh}EA}|Kk(B=$vT1Lmw8(Zq< zSuNz23f5?pi$f)OqR>go0`I+0uYLnjFM1W8 zCL17dHLoqw5~r)T2XsZR4P4<`2x)3{$!wlk(Ex}gd?tY5em8o=<^8BfEN^}Y#;LRa zw2mh?biNu-c;hc{UY6SYP1P`vRb!LH!A-lR6#Vs6c<^f9I|FU*lVe@AZ2XD1_{~Xk zxE6gvzP_MBGRmj+Xy1D3?1^<}j~s75g+KGr;BAVDBpVw!dk`*%NEykFu@(B0P@o#N#-eE7Zc8zEb32whW#A6)|0++c)KC z)kL-X%}xxWgL=FrvL)ptmXU}|E=-%L2K7#&kR!}qhSo@$>d(eI3S2&S-2t-!>Nf!6dDsE-Ha5bRK5ITudg zYkWk&;TH2TK|y!{sR!wXm;%*T1$ck1!nI@j8ob;^v{~n_d^h^9hOgO2KqWVw@&N~^mDbV@Et=3KA2g3 zIzGx{-jCqBH2Je$MIkyoGQzLy8RI=|_dMynY#v+E69W~d8ZKRYO(Xk&3Up(c8QzIp z!~xeGF@TRM{@1z;#X7${kyXOWMr((ag}y02v!E|eVw+r<8ao~FmooR_GpYw@JRC4C zre`|4+@gABih7aaPQe#5_(~ukk8)forjIX!?i9Rh#x-v|Qn0?G3|gMaZys9e@zD%c z#(S-?+&S<@8evw&4Bz;$r{h@c@SPPkxbv08J?d;N86S6Hhl%$m@bg@d8Wv~b(8ZJ!5+#E&DPICpQhv~~6uBx%XU#DBKq%ysWci zAe0?>qIpT==rv($hYuB+YhsK`>Ur@um%q{^jq5X=e$mBZ=Oba7qk8x2pJ#jK;LNY& z+G2l2$7FoKfJ2WWeE(SCsVb`Q% Date: Thu, 15 Aug 2024 18:43:06 +0200 Subject: [PATCH 056/389] Adds a warning about manually mounting snapshots managed by ILM (#111883) * Adds a warning about manually mounting snapshots managed by ILM * Shortens text and moves the warning to Searchable snapshots chapter --- .../searchable-snapshots/apis/mount-snapshot.asciidoc | 2 ++ docs/reference/searchable-snapshots/index.asciidoc | 11 +++++++++++ 2 files changed, 13 insertions(+) diff --git a/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc index ba25cebcd1e1a..5d838eb86dcf3 100644 --- a/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc +++ b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc @@ -23,6 +23,8 @@ For more information, see <>. [[searchable-snapshots-api-mount-desc]] ==== {api-description-title} +This API mounts a snapshot as a searchable snapshot index. +Note that manually mounting {ilm-init}-managed snapshots can <> with <>. [[searchable-snapshots-api-mount-path-params]] ==== {api-path-parms-title} diff --git a/docs/reference/searchable-snapshots/index.asciidoc b/docs/reference/searchable-snapshots/index.asciidoc index 12b6f2477a93c..8e4a1b93b9c05 100644 --- a/docs/reference/searchable-snapshots/index.asciidoc +++ b/docs/reference/searchable-snapshots/index.asciidoc @@ -170,6 +170,17 @@ do not have a dedicated frozen tier, you must configure the cache on one or more nodes. Partially mounted indices are only allocated to nodes that have a shared cache. +[[manually-mounting-snapshots]] +[WARNING] +.Manual snapshot mounting +==== +Manually mounting snapshots captured by an Index Lifecycle Management ({ilm-init}) policy can +interfere with {ilm-init}'s automatic management. This may lead to issues such as data loss +or complications with snapshot handling. For optimal results, allow {ilm-init} to manage +snapshots automatically. If manual mounting is necessary, be aware of its potential +impact on {ilm-init} processes. For more information, learn about <>. +==== + [[searchable-snapshots-shared-cache]] `xpack.searchable.snapshot.shared_cache.size`:: (<>) From 0ca60c6c76301085455187f0700a29aa38094e6b Mon Sep 17 00:00:00 2001 From: Athena Brown Date: Thu, 15 Aug 2024 11:53:34 -0600 Subject: [PATCH 057/389] Update OAuth2 OIDC SDK (#108799) This commit updates the Nimbus OAuth2 OIDC SDK and the associated Nimbus JOSE+JWT, however a few odd choices had to be made in the process. First, we update to versions which are old at time of merge. This is because versions of Nimbus JOSE+JWT 9.38 and after through time of writing [contain a bug](https://bitbucket.org/connect2id/nimbus-jose-jwt/issues/550/java-module-doesnt-work-properly-with) in which the shaded gson class files included in the library are not properly loaded by our module loading code (and possibly in general? the root cause of the bug is unclear at time of writing but it does not appear to be present in all uses of this library). This requires us to use an older version of Nimbus OAuth2 OIDC SDK as well. Second, the aforementioned shaded gson uses reflection internally, and is used in unpredictable places in these libraries (e.g. constructors). This is extremely unfriendly to our usage of the security manager. In order to make the scope of permission grants as narrow as possible, we shadow nimbus-jose-jwt in order to insert `AccessController.doPrivileged` calls at the appropriate points, given the usage of gson is relatively contained. This approach was chosen over other approaches given 1) the relative simplicity given the implementation of the library, and 2) the complexity involved in safely using the library any other way - as one example, gson is used frequently in `toString()` methods, which are frequently called implicitly, especially in combination with logging which may mask security manager exceptions from being surfaced in tests. All of the code we intercept should be re-evaluated when this library is next upgraded. Co-authored-by: Jake Landis --- gradle/verification-metadata.xml | 18 +- x-pack/plugin/security/build.gradle | 255 +++++++++--------- x-pack/plugin/security/lib/build.gradle | 13 + .../build.gradle | 29 ++ .../licenses/nimbus-jose-jwt-LICENSE.txt | 0 .../licenses/nimbus-jose-jwt-NOTICE.txt | 0 .../build.gradle | 26 ++ .../lib/nimbus-jose-jwt-modified/build.gradle | 30 +++ .../licenses/nimbus-jose-jwt-LICENSE.txt | 202 ++++++++++++++ .../licenses/nimbus-jose-jwt-NOTICE.txt | 14 + .../nimbusds/jose/util/JSONObjectUtils.java | 208 ++++++++++++++ .../nimbusds/jose/util/JSONStringUtils.java | 26 ++ .../xpack/security/authc/jwt/JwtUtil.java | 14 +- .../plugin-metadata/plugin-security.policy | 6 + .../authc/jwt/JwtSignatureValidatorTests.java | 2 +- .../authc/oidc/OpenIdConnectTestCase.java | 7 +- 16 files changed, 711 insertions(+), 139 deletions(-) create mode 100644 x-pack/plugin/security/lib/build.gradle create mode 100644 x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part1/build.gradle rename x-pack/plugin/security/{ => lib/nimbus-jose-jwt-modified-part1}/licenses/nimbus-jose-jwt-LICENSE.txt (100%) rename x-pack/plugin/security/{ => lib/nimbus-jose-jwt-modified-part1}/licenses/nimbus-jose-jwt-NOTICE.txt (100%) create mode 100644 x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part2/build.gradle create mode 100644 x-pack/plugin/security/lib/nimbus-jose-jwt-modified/build.gradle create mode 100644 x-pack/plugin/security/lib/nimbus-jose-jwt-modified/licenses/nimbus-jose-jwt-LICENSE.txt create mode 100644 x-pack/plugin/security/lib/nimbus-jose-jwt-modified/licenses/nimbus-jose-jwt-NOTICE.txt create mode 100644 x-pack/plugin/security/lib/nimbus-jose-jwt-modified/src/main/java/com/nimbusds/jose/util/JSONObjectUtils.java create mode 100644 x-pack/plugin/security/lib/nimbus-jose-jwt-modified/src/main/java/com/nimbusds/jose/util/JSONStringUtils.java diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index f6f9878ea20c7..00f1caec24cf7 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -946,9 +946,11 @@ - - - + + + + + @@ -961,6 +963,11 @@ + + + + + @@ -1739,6 +1746,11 @@ + + + + + diff --git a/x-pack/plugin/security/build.gradle b/x-pack/plugin/security/build.gradle index 07308d5d29a9a..d3697eade8b24 100644 --- a/x-pack/plugin/security/build.gradle +++ b/x-pack/plugin/security/build.gradle @@ -79,12 +79,19 @@ dependencies { runtimeOnly "joda-time:joda-time:2.10.10" // Dependencies for oidc - api "com.nimbusds:oauth2-oidc-sdk:9.37" - api "com.nimbusds:nimbus-jose-jwt:9.23" + api "com.nimbusds:oauth2-oidc-sdk:11.10.1" + api project(path: xpackModule('security:lib:nimbus-jose-jwt-modified'), configuration: 'shadow') + if (isEclipse) { + /* + * Eclipse can't pick up the shadow dependency so we point it at the unmodified version of the library + * so it can compile things. + */ + api "com.nimbusds:nimbus-jose-jwt:9.37.3" + } api "com.nimbusds:lang-tag:1.4.4" api "com.sun.mail:jakarta.mail:1.6.3" api "net.jcip:jcip-annotations:1.0" - api "net.minidev:json-smart:2.4.10" + api "net.minidev:json-smart:2.5.1" api "net.minidev:accessors-smart:2.4.2" api "org.ow2.asm:asm:8.0.1" @@ -103,7 +110,6 @@ dependencies { testImplementation('org.apache.kerby:kerb-crypto:1.1.1') testImplementation('org.apache.kerby:kerb-util:1.1.1') testImplementation('org.apache.kerby:token-provider:1.1.1') - testImplementation('com.nimbusds:nimbus-jose-jwt:9.23') testImplementation('net.jcip:jcip-annotations:1.0') testImplementation('org.apache.kerby:kerb-admin:1.1.1') testImplementation('org.apache.kerby:kerb-server:1.1.1') @@ -225,6 +231,9 @@ tasks.named("thirdPartyAudit").configure { 'javax.servlet.http.HttpSession', 'javax.servlet.http.HttpUpgradeHandler', 'javax.servlet.http.Part', + 'jakarta.servlet.ServletRequest', + 'jakarta.servlet.http.HttpServletRequest', + 'jakarta.servlet.http.HttpServletResponse', // [missing classes] Shibboleth + OpenSAML have velocity support that we don't use 'org.apache.velocity.VelocityContext', 'org.apache.velocity.app.VelocityEngine', @@ -274,112 +283,103 @@ tasks.named("thirdPartyAudit").configure { // [missing classes] Http Client cache has optional ehcache support 'net.sf.ehcache.Ehcache', 'net.sf.ehcache.Element', - // Bouncycastle is an optional dependency for apache directory, cryptacular and opensaml packages. We - // acknowledge them here instead of adding bouncy castle as a compileOnly dependency - 'org.bouncycastle.asn1.ASN1Encodable', - 'org.bouncycastle.asn1.ASN1InputStream', - 'org.bouncycastle.asn1.ASN1Integer', - 'org.bouncycastle.asn1.ASN1ObjectIdentifier', - 'org.bouncycastle.asn1.ASN1OctetString', - 'org.bouncycastle.asn1.ASN1Primitive', - 'org.bouncycastle.asn1.ASN1Sequence', - 'org.bouncycastle.asn1.ASN1TaggedObject', - // 'org.bouncycastle.asn1.DEROctetString', - 'org.bouncycastle.asn1.pkcs.EncryptedPrivateKeyInfo', - 'org.bouncycastle.asn1.pkcs.EncryptionScheme', - 'org.bouncycastle.asn1.pkcs.KeyDerivationFunc', - 'org.bouncycastle.asn1.pkcs.PBEParameter', - 'org.bouncycastle.asn1.pkcs.PBES2Parameters', - 'org.bouncycastle.asn1.pkcs.PBKDF2Params', - 'org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers', - 'org.bouncycastle.asn1.pkcs.PrivateKeyInfo', - 'org.bouncycastle.asn1.x500.AttributeTypeAndValue', - 'org.bouncycastle.asn1.x500.RDN', - 'org.bouncycastle.asn1.x500.X500Name', - 'org.bouncycastle.asn1.x509.AccessDescription', - 'org.bouncycastle.asn1.x509.AlgorithmIdentifier', - 'org.bouncycastle.asn1.x509.AuthorityKeyIdentifier', - 'org.bouncycastle.asn1.x509.BasicConstraints', - 'org.bouncycastle.asn1.x509.DistributionPoint', - 'org.bouncycastle.asn1.x509.Extension', - 'org.bouncycastle.asn1.x509.GeneralName', - 'org.bouncycastle.asn1.x509.GeneralNames', - 'org.bouncycastle.asn1.x509.GeneralNamesBuilder', - 'org.bouncycastle.asn1.x509.KeyPurposeId', - 'org.bouncycastle.asn1.x509.KeyUsage', - 'org.bouncycastle.asn1.x509.PolicyInformation', - 'org.bouncycastle.asn1.x509.SubjectKeyIdentifier', - 'org.bouncycastle.asn1.x509.SubjectPublicKeyInfo', - // 'org.bouncycastle.asn1.x9.DomainParameters', - // 'org.bouncycastle.asn1.x9.ECNamedCurveTable', - 'org.bouncycastle.asn1.x9.X9ECParameters', - 'org.bouncycastle.cert.X509v3CertificateBuilder', - 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', - 'org.bouncycastle.cert.jcajce.JcaX509ExtensionUtils', - 'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder', - 'org.bouncycastle.crypto.BlockCipher', - 'org.bouncycastle.crypto.BufferedBlockCipher', - 'org.bouncycastle.crypto.CipherParameters', - 'org.bouncycastle.crypto.Digest', - 'org.bouncycastle.crypto.PBEParametersGenerator', - 'org.bouncycastle.crypto.StreamCipher', - 'org.bouncycastle.crypto.agreement.kdf.ConcatenationKDFGenerator', - // 'org.bouncycastle.crypto.ec.CustomNamedCurves', - 'org.bouncycastle.crypto.engines.AESEngine', - 'org.bouncycastle.crypto.generators.BCrypt', - 'org.bouncycastle.crypto.generators.OpenSSLPBEParametersGenerator', - 'org.bouncycastle.crypto.generators.PKCS5S1ParametersGenerator', - 'org.bouncycastle.crypto.generators.PKCS5S2ParametersGenerator', - 'org.bouncycastle.crypto.macs.HMac', - 'org.bouncycastle.crypto.modes.AEADBlockCipher', - 'org.bouncycastle.crypto.modes.GCMBlockCipher', - 'org.bouncycastle.crypto.paddings.BlockCipherPadding', - 'org.bouncycastle.crypto.paddings.PaddedBufferedBlockCipher', - 'org.bouncycastle.crypto.params.AsymmetricKeyParameter', - 'org.bouncycastle.crypto.params.DSAKeyParameters', - 'org.bouncycastle.crypto.params.DSAParameters', - 'org.bouncycastle.crypto.params.DSAPrivateKeyParameters', - 'org.bouncycastle.crypto.params.DSAPublicKeyParameters', - 'org.bouncycastle.crypto.params.ECDomainParameters', - 'org.bouncycastle.crypto.params.ECKeyParameters', - 'org.bouncycastle.crypto.params.ECPrivateKeyParameters', - 'org.bouncycastle.crypto.params.ECPublicKeyParameters', - // 'org.bouncycastle.crypto.params.KDFParameters', - 'org.bouncycastle.crypto.params.KeyParameter', - 'org.bouncycastle.crypto.params.RSAKeyParameters', - 'org.bouncycastle.crypto.params.RSAPrivateCrtKeyParameters', - 'org.bouncycastle.crypto.prng.EntropySource', - 'org.bouncycastle.crypto.prng.SP800SecureRandom', - 'org.bouncycastle.crypto.prng.SP800SecureRandomBuilder', - 'org.bouncycastle.crypto.prng.drbg.SP80090DRBG', - 'org.bouncycastle.crypto.signers.DSASigner', - 'org.bouncycastle.crypto.signers.ECDSASigner', - 'org.bouncycastle.crypto.signers.RSADigestSigner', - 'org.bouncycastle.crypto.util.PrivateKeyFactory', - 'org.bouncycastle.crypto.util.PrivateKeyInfoFactory', - 'org.bouncycastle.crypto.util.PublicKeyFactory', - 'org.bouncycastle.crypto.util.SubjectPublicKeyInfoFactory', - 'org.bouncycastle.jcajce.provider.asymmetric.dsa.KeyPairGeneratorSpi', - 'org.bouncycastle.jcajce.provider.asymmetric.ec.KeyPairGeneratorSpi$EC', - 'org.bouncycastle.jcajce.provider.asymmetric.rsa.KeyPairGeneratorSpi', - 'org.bouncycastle.jcajce.provider.asymmetric.util.EC5Util', - 'org.bouncycastle.jcajce.provider.asymmetric.util.ECUtil', - // 'org.bouncycastle.jce.ECNamedCurveTable', - // 'org.bouncycastle.jce.spec.ECNamedCurveParameterSpec', - 'org.bouncycastle.math.ec.ECFieldElement', - 'org.bouncycastle.math.ec.ECPoint', - 'org.bouncycastle.openssl.jcajce.JcaPEMWriter', - 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', - 'org.bouncycastle.util.Arrays', - 'org.bouncycastle.util.io.Streams', - 'org.bouncycastle.cert.jcajce.JcaX509CertificateHolder', - 'org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider', - 'org.bouncycastle.cert.X509CertificateHolder', - 'org.bouncycastle.openssl.PEMKeyPair', - 'org.bouncycastle.openssl.PEMParser', - 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', - 'org.bouncycastle.crypto.InvalidCipherTextException', - 'org.bouncycastle.jce.provider.BouncyCastleProvider', + // Bouncycastle is an optional dependency for apache directory, cryptacular and opensaml packages. We + // acknowledge them here instead of adding bouncy castle as a compileOnly dependency + 'org.bouncycastle.asn1.ASN1Encodable', + 'org.bouncycastle.asn1.ASN1InputStream', + 'org.bouncycastle.asn1.ASN1Integer', + 'org.bouncycastle.asn1.ASN1ObjectIdentifier', + 'org.bouncycastle.asn1.ASN1OctetString', + 'org.bouncycastle.asn1.ASN1Primitive', + 'org.bouncycastle.asn1.ASN1Sequence', + 'org.bouncycastle.asn1.ASN1TaggedObject', + // 'org.bouncycastle.asn1.DEROctetString', + 'org.bouncycastle.asn1.pkcs.EncryptedPrivateKeyInfo', + 'org.bouncycastle.asn1.pkcs.EncryptionScheme', + 'org.bouncycastle.asn1.pkcs.KeyDerivationFunc', + 'org.bouncycastle.asn1.pkcs.PBEParameter', + 'org.bouncycastle.asn1.pkcs.PBES2Parameters', + 'org.bouncycastle.asn1.pkcs.PBKDF2Params', + 'org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers', + 'org.bouncycastle.asn1.pkcs.PrivateKeyInfo', + 'org.bouncycastle.asn1.x500.AttributeTypeAndValue', + 'org.bouncycastle.asn1.x500.RDN', + 'org.bouncycastle.asn1.x500.X500Name', + 'org.bouncycastle.asn1.x509.AccessDescription', + 'org.bouncycastle.asn1.x509.AlgorithmIdentifier', + 'org.bouncycastle.asn1.x509.AuthorityKeyIdentifier', + 'org.bouncycastle.asn1.x509.BasicConstraints', + 'org.bouncycastle.asn1.x509.DistributionPoint', + 'org.bouncycastle.asn1.x509.Extension', + 'org.bouncycastle.asn1.x509.GeneralName', + 'org.bouncycastle.asn1.x509.GeneralNames', + 'org.bouncycastle.asn1.x509.GeneralNamesBuilder', + 'org.bouncycastle.asn1.x509.KeyPurposeId', + 'org.bouncycastle.asn1.x509.KeyUsage', + 'org.bouncycastle.asn1.x509.PolicyInformation', + 'org.bouncycastle.asn1.x509.SubjectKeyIdentifier', + 'org.bouncycastle.asn1.x509.SubjectPublicKeyInfo', + // 'org.bouncycastle.asn1.x9.DomainParameters', + // 'org.bouncycastle.asn1.x9.ECNamedCurveTable', + 'org.bouncycastle.asn1.x9.X9ECParameters', + 'org.bouncycastle.cert.X509v3CertificateBuilder', + 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', + 'org.bouncycastle.cert.jcajce.JcaX509ExtensionUtils', + 'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder', + 'org.bouncycastle.crypto.BlockCipher', + 'org.bouncycastle.crypto.BufferedBlockCipher', + 'org.bouncycastle.crypto.CipherParameters', + 'org.bouncycastle.crypto.Digest', + 'org.bouncycastle.crypto.PBEParametersGenerator', + 'org.bouncycastle.crypto.StreamCipher', + 'org.bouncycastle.crypto.agreement.kdf.ConcatenationKDFGenerator', + // 'org.bouncycastle.crypto.ec.CustomNamedCurves', + 'org.bouncycastle.crypto.generators.BCrypt', + 'org.bouncycastle.crypto.generators.OpenSSLPBEParametersGenerator', + 'org.bouncycastle.crypto.generators.PKCS5S1ParametersGenerator', + 'org.bouncycastle.crypto.generators.PKCS5S2ParametersGenerator', + 'org.bouncycastle.crypto.macs.HMac', + 'org.bouncycastle.crypto.modes.AEADBlockCipher', + 'org.bouncycastle.crypto.paddings.BlockCipherPadding', + 'org.bouncycastle.crypto.paddings.PaddedBufferedBlockCipher', + 'org.bouncycastle.crypto.params.AsymmetricKeyParameter', + 'org.bouncycastle.crypto.params.DSAKeyParameters', + 'org.bouncycastle.crypto.params.DSAParameters', + 'org.bouncycastle.crypto.params.DSAPrivateKeyParameters', + 'org.bouncycastle.crypto.params.DSAPublicKeyParameters', + 'org.bouncycastle.crypto.params.ECDomainParameters', + 'org.bouncycastle.crypto.params.ECKeyParameters', + 'org.bouncycastle.crypto.params.ECPrivateKeyParameters', + 'org.bouncycastle.crypto.params.ECPublicKeyParameters', + // 'org.bouncycastle.crypto.params.KDFParameters', + 'org.bouncycastle.crypto.params.KeyParameter', + 'org.bouncycastle.crypto.params.RSAKeyParameters', + 'org.bouncycastle.crypto.params.RSAPrivateCrtKeyParameters', + 'org.bouncycastle.crypto.prng.EntropySource', + 'org.bouncycastle.crypto.prng.SP800SecureRandom', + 'org.bouncycastle.crypto.prng.SP800SecureRandomBuilder', + 'org.bouncycastle.crypto.prng.drbg.SP80090DRBG', + 'org.bouncycastle.crypto.signers.DSASigner', + 'org.bouncycastle.crypto.signers.ECDSASigner', + 'org.bouncycastle.crypto.signers.RSADigestSigner', + 'org.bouncycastle.crypto.util.PrivateKeyFactory', + 'org.bouncycastle.crypto.util.PrivateKeyInfoFactory', + 'org.bouncycastle.crypto.util.PublicKeyFactory', + 'org.bouncycastle.crypto.util.SubjectPublicKeyInfoFactory', + 'org.bouncycastle.jcajce.provider.asymmetric.dsa.KeyPairGeneratorSpi', + 'org.bouncycastle.jcajce.provider.asymmetric.ec.KeyPairGeneratorSpi$EC', + 'org.bouncycastle.jcajce.provider.asymmetric.rsa.KeyPairGeneratorSpi', + 'org.bouncycastle.jcajce.provider.asymmetric.util.EC5Util', + 'org.bouncycastle.jcajce.provider.asymmetric.util.ECUtil', + // 'org.bouncycastle.jce.ECNamedCurveTable', + // 'org.bouncycastle.jce.spec.ECNamedCurveParameterSpec', + 'org.bouncycastle.math.ec.ECFieldElement', + 'org.bouncycastle.math.ec.ECPoint', + 'org.bouncycastle.openssl.jcajce.JcaPEMWriter', + 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.util.Arrays', + 'org.bouncycastle.util.io.Streams', + 'org.bouncycastle.cert.X509CertificateHolder', ) ignoreViolations( @@ -402,26 +402,21 @@ tasks.named("thirdPartyAudit").configure { tasks.named("thirdPartyAudit").configure { ignoreMissingClasses( - 'javax.xml.bind.JAXBContext', - 'javax.xml.bind.JAXBElement', - 'javax.xml.bind.JAXBException', - 'javax.xml.bind.Unmarshaller', - 'javax.xml.bind.UnmarshallerHandler', - // Optional dependency of oauth2-oidc-sdk that we don't need since we do not support AES-SIV for JWE - 'org.cryptomator.siv.SivMode', - // Optional dependency of nimbus-jose-jwt for handling Ed25519 signatures and ECDH with X25519 (RFC 8037) - 'com.google.crypto.tink.subtle.Ed25519Sign', - 'com.google.crypto.tink.subtle.Ed25519Sign$KeyPair', - 'com.google.crypto.tink.subtle.Ed25519Verify', - 'com.google.crypto.tink.subtle.X25519', - 'com.google.crypto.tink.subtle.XChaCha20Poly1305', - 'com.nimbusds.common.contenttype.ContentType', - 'javax.activation.ActivationDataFlavor', - 'javax.activation.DataContentHandler', - 'javax.activation.DataHandler', - 'javax.activation.DataSource', - 'javax.activation.FileDataSource', - 'javax.activation.FileTypeMap' + 'javax.xml.bind.JAXBContext', + 'javax.xml.bind.JAXBElement', + 'javax.xml.bind.JAXBException', + 'javax.xml.bind.Unmarshaller', + 'javax.xml.bind.UnmarshallerHandler', + // Optional dependency of oauth2-oidc-sdk that we don't need since we do not support AES-SIV for JWE + 'org.cryptomator.siv.SivMode', + 'com.nimbusds.common.contenttype.ContentType', + 'com.nimbusds.common.contenttype.ContentType$Parameter', + 'javax.activation.ActivationDataFlavor', + 'javax.activation.DataContentHandler', + 'javax.activation.DataHandler', + 'javax.activation.DataSource', + 'javax.activation.FileDataSource', + 'javax.activation.FileTypeMap' ) } diff --git a/x-pack/plugin/security/lib/build.gradle b/x-pack/plugin/security/lib/build.gradle new file mode 100644 index 0000000000000..7bc94f348e781 --- /dev/null +++ b/x-pack/plugin/security/lib/build.gradle @@ -0,0 +1,13 @@ +// This build deserves an explanation. Nimbus-jose-jwt uses gson internally, which is unfriendly +// to our usage of the security manager, to a degree that it makes the library extremely difficult +// to work with safely. The purpose of this build is to create a version of nimbus-jose-jwt with +// a couple classes replaced with wrappers which work with the security manager, the source files +// in this directory. + +// Because we want to include the original class files so that we can reference them without +// modification, there are a couple intermediate steps: +// nimbus-jose-jwt-modified-part1: Create a version of the JAR in which the relevant class files are moved to a different package. +// This is not immediately usable as this process rewrites the rest of the JAR to "correctly" reference the new classes. So, we need to... +// nimbus-jose-jwt-modified-part2: Create a JAR from the result of part 1 which contains *only* the relevant class files by removing everything else. +// nimbus-jose-jwt-modified: Use the result of part 2 here, combined with the original library, so that we can use our +// replacement classes which wrap the original class files. diff --git a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part1/build.gradle b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part1/build.gradle new file mode 100644 index 0000000000000..f751fcd0a655d --- /dev/null +++ b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part1/build.gradle @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +apply plugin: 'elasticsearch.build' +apply plugin: 'com.github.johnrengelman.shadow' + +// See the build.gradle file in the parent directory for an explanation of this unusual build + +dependencies { + implementation "com.nimbusds:nimbus-jose-jwt:9.37.3" +} + +tasks.named('shadowJar').configure { + // Attempting to exclude all of the classes we *don't* move here ought to be possible per the + // shadowJar docs, but actually attempting to do so results in an empty JAR. May be a bug in the shadowJar plugin. + relocate 'com.nimbusds.jose.util.JSONObjectUtils', 'org.elasticsearch.nimbus.jose.util.JSONObjectUtils' + relocate 'com.nimbusds.jose.util.JSONStringUtils', 'org.elasticsearch.nimbus.jose.util.JSONStringUtils' +} + +['jarHell', 'thirdPartyAudit', 'forbiddenApisMain', 'splitPackagesAudit', 'licenseHeaders'].each { + tasks.named(it).configure { + enabled = false + } +} + diff --git a/x-pack/plugin/security/licenses/nimbus-jose-jwt-LICENSE.txt b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part1/licenses/nimbus-jose-jwt-LICENSE.txt similarity index 100% rename from x-pack/plugin/security/licenses/nimbus-jose-jwt-LICENSE.txt rename to x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part1/licenses/nimbus-jose-jwt-LICENSE.txt diff --git a/x-pack/plugin/security/licenses/nimbus-jose-jwt-NOTICE.txt b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part1/licenses/nimbus-jose-jwt-NOTICE.txt similarity index 100% rename from x-pack/plugin/security/licenses/nimbus-jose-jwt-NOTICE.txt rename to x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part1/licenses/nimbus-jose-jwt-NOTICE.txt diff --git a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part2/build.gradle b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part2/build.gradle new file mode 100644 index 0000000000000..c4c0f2ebd2fe1 --- /dev/null +++ b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part2/build.gradle @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +apply plugin: 'elasticsearch.build' +apply plugin: 'com.github.johnrengelman.shadow' + +// See the build.gradle file in the parent directory for an explanation of this unusual build + +dependencies { + implementation project(path: xpackModule('security:lib:nimbus-jose-jwt-modified-part1'), configuration: 'shadow') +} + +tasks.named('shadowJar').configure { + // Drop everything in the original namespace, as the classes we want to modify have already been moved to another package by part 1 + exclude 'com/nimbusds/' +} + +['jarHell', 'thirdPartyAudit', 'forbiddenApisMain', 'splitPackagesAudit', 'licenseHeaders'].each { + tasks.named(it).configure { + enabled = false + } +} diff --git a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/build.gradle b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/build.gradle new file mode 100644 index 0000000000000..3438c067d8ab5 --- /dev/null +++ b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/build.gradle @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +apply plugin: 'elasticsearch.build' +apply plugin: 'com.github.johnrengelman.shadow' + +// See the build.gradle file in the parent directory for an explanation of this unusual build + +dependencies { + implementation "com.nimbusds:nimbus-jose-jwt:9.37.3" + implementation project(path: xpackModule('security:lib:nimbus-jose-jwt-modified-part2'), configuration: 'shadow') +} + +tasks.named('shadowJar').configure { + manifest { + // The original library uses this and it gets stripped by shadowJar + attributes 'Automatic-Module-Name': 'com.nimbusds.jose.jwt' + } +} + +['jarHell', 'thirdPartyAudit', 'forbiddenApisMain', 'splitPackagesAudit', 'licenseHeaders'].each { + tasks.named(it).configure { + enabled = false + } +} diff --git a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/licenses/nimbus-jose-jwt-LICENSE.txt b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/licenses/nimbus-jose-jwt-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/licenses/nimbus-jose-jwt-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/licenses/nimbus-jose-jwt-NOTICE.txt b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/licenses/nimbus-jose-jwt-NOTICE.txt new file mode 100644 index 0000000000000..cb9ad94f662a6 --- /dev/null +++ b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/licenses/nimbus-jose-jwt-NOTICE.txt @@ -0,0 +1,14 @@ +Nimbus JOSE + JWT + +Copyright 2012 - 2018, Connect2id Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. diff --git a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/src/main/java/com/nimbusds/jose/util/JSONObjectUtils.java b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/src/main/java/com/nimbusds/jose/util/JSONObjectUtils.java new file mode 100644 index 0000000000000..1ea11f5c280ef --- /dev/null +++ b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/src/main/java/com/nimbusds/jose/util/JSONObjectUtils.java @@ -0,0 +1,208 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package com.nimbusds.jose.util; + +import java.net.URI; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.text.ParseException; +import java.util.List; +import java.util.Map; + +/** + * This class wraps {@link org.elasticsearch.nimbus.jose.util.JSONObjectUtils}, which is copied directly from the source + * library, and delegates to that class as quickly as possible. This layer is only here to provide a point at which we + * can insert {@link java.security.AccessController#doPrivileged(PrivilegedAction)} calls as necessary. We don't do + * anything here other than ensure gson has the proper security manager permissions. + */ +public class JSONObjectUtils { + + public static Map parse(final String s) throws ParseException { + try { + return AccessController.doPrivileged( + (PrivilegedExceptionAction>) () -> org.elasticsearch.nimbus.jose.util.JSONObjectUtils.parse(s) + ); + } catch (PrivilegedActionException e) { + throw (ParseException) e.getException(); + } + } + + public static Map parse(final String s, final int sizeLimit) throws ParseException { + try { + return AccessController.doPrivileged( + (PrivilegedExceptionAction>) () -> org.elasticsearch.nimbus.jose.util.JSONObjectUtils.parse( + s, + sizeLimit + ) + ); + } catch (PrivilegedActionException e) { + throw (ParseException) e.getException(); + } + } + + @Deprecated + public static Map parseJSONObject(final String s) throws ParseException { + try { + return AccessController.doPrivileged( + (PrivilegedExceptionAction>) () -> org.elasticsearch.nimbus.jose.util.JSONObjectUtils.parseJSONObject(s) + ); + } catch (PrivilegedActionException e) { + throw (ParseException) e.getException(); + } + } + + public static boolean getBoolean(final Map o, final String key) throws ParseException { + try { + return AccessController.doPrivileged( + (PrivilegedExceptionAction) () -> org.elasticsearch.nimbus.jose.util.JSONObjectUtils.getBoolean(o, key) + ); + } catch (PrivilegedActionException e) { + throw (ParseException) e.getException(); + } + } + + public static int getInt(final Map o, final String key) throws ParseException { + try { + return AccessController.doPrivileged( + (PrivilegedExceptionAction) () -> org.elasticsearch.nimbus.jose.util.JSONObjectUtils.getInt(o, key) + ); + } catch (PrivilegedActionException e) { + throw (ParseException) e.getException(); + } + } + + public static long getLong(final Map o, final String key) throws ParseException { + try { + return AccessController.doPrivileged( + (PrivilegedExceptionAction) () -> org.elasticsearch.nimbus.jose.util.JSONObjectUtils.getLong(o, key) + ); + } catch (PrivilegedActionException e) { + throw (ParseException) e.getException(); + } + } + + public static float getFloat(final Map o, final String key) throws ParseException { + try { + return AccessController.doPrivileged( + (PrivilegedExceptionAction) () -> org.elasticsearch.nimbus.jose.util.JSONObjectUtils.getFloat(o, key) + ); + } catch (PrivilegedActionException e) { + throw (ParseException) e.getException(); + } + } + + public static double getDouble(final Map o, final String key) throws ParseException { + try { + return AccessController.doPrivileged( + (PrivilegedExceptionAction) () -> org.elasticsearch.nimbus.jose.util.JSONObjectUtils.getDouble(o, key) + ); + } catch (PrivilegedActionException e) { + throw (ParseException) e.getException(); + } + } + + public static String getString(final Map o, final String key) throws ParseException { + try { + return AccessController.doPrivileged( + (PrivilegedExceptionAction) () -> org.elasticsearch.nimbus.jose.util.JSONObjectUtils.getString(o, key) + ); + } catch (PrivilegedActionException e) { + throw (ParseException) e.getException(); + } + } + + public static URI getURI(final Map o, final String key) throws ParseException { + try { + return AccessController.doPrivileged( + (PrivilegedExceptionAction) () -> org.elasticsearch.nimbus.jose.util.JSONObjectUtils.getURI(o, key) + ); + } catch (PrivilegedActionException e) { + throw (ParseException) e.getException(); + } + } + + public static List getJSONArray(final Map o, final String key) throws ParseException { + try { + return AccessController.doPrivileged( + (PrivilegedExceptionAction>) () -> org.elasticsearch.nimbus.jose.util.JSONObjectUtils.getJSONArray(o, key) + ); + } catch (PrivilegedActionException e) { + throw (ParseException) e.getException(); + } + } + + public static String[] getStringArray(final Map o, final String key) throws ParseException { + try { + return AccessController.doPrivileged( + (PrivilegedExceptionAction) () -> org.elasticsearch.nimbus.jose.util.JSONObjectUtils.getStringArray(o, key) + ); + } catch (PrivilegedActionException e) { + throw (ParseException) e.getException(); + } + } + + public static Map[] getJSONObjectArray(final Map o, final String key) throws ParseException { + try { + return AccessController.doPrivileged( + (PrivilegedExceptionAction[]>) () -> org.elasticsearch.nimbus.jose.util.JSONObjectUtils + .getJSONObjectArray(o, key) + ); + } catch (PrivilegedActionException e) { + throw (ParseException) e.getException(); + } + } + + public static List getStringList(final Map o, final String key) throws ParseException { + try { + return AccessController.doPrivileged( + (PrivilegedExceptionAction>) () -> org.elasticsearch.nimbus.jose.util.JSONObjectUtils.getStringList(o, key) + ); + } catch (PrivilegedActionException e) { + throw (ParseException) e.getException(); + } + } + + public static Map getJSONObject(final Map o, final String key) throws ParseException { + try { + return AccessController.doPrivileged( + (PrivilegedExceptionAction>) () -> org.elasticsearch.nimbus.jose.util.JSONObjectUtils.getJSONObject( + o, + key + ) + ); + } catch (PrivilegedActionException e) { + throw (ParseException) e.getException(); + } + } + + public static Base64URL getBase64URL(final Map o, final String key) throws ParseException { + try { + return AccessController.doPrivileged( + (PrivilegedExceptionAction) () -> org.elasticsearch.nimbus.jose.util.JSONObjectUtils.getBase64URL(o, key) + ); + } catch (PrivilegedActionException e) { + throw (ParseException) e.getException(); + } + } + + public static String toJSONString(final Map o) { + return AccessController.doPrivileged( + (PrivilegedAction) () -> org.elasticsearch.nimbus.jose.util.JSONObjectUtils.toJSONString(o) + ); + } + + public static Map newJSONObject() { + return AccessController.doPrivileged( + (PrivilegedAction>) org.elasticsearch.nimbus.jose.util.JSONObjectUtils::newJSONObject + ); + } + + private JSONObjectUtils() {} +} diff --git a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/src/main/java/com/nimbusds/jose/util/JSONStringUtils.java b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/src/main/java/com/nimbusds/jose/util/JSONStringUtils.java new file mode 100644 index 0000000000000..e9e34d21ce7d6 --- /dev/null +++ b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/src/main/java/com/nimbusds/jose/util/JSONStringUtils.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package com.nimbusds.jose.util; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +/** + * This class wraps {@link JSONStringUtils}, which is copied directly from the source library, and delegates to + * that class as quickly as possible. This layer is only here to provide a point at which we can insert + * {@link java.security.AccessController#doPrivileged(PrivilegedAction)} calls as necessary. We don't do anything here + * other than ensure gson has the proper security manager permissions. + */ +public class JSONStringUtils { + + public static String toJSONString(final String string) { + return AccessController.doPrivileged((PrivilegedAction) () -> JSONStringUtils.toJSONString(string)); + } + + private JSONStringUtils() {} +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtil.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtil.java index 928ecd7fa265d..b345178e205c3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtil.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtil.java @@ -11,7 +11,6 @@ import com.nimbusds.jose.jwk.JWK; import com.nimbusds.jose.jwk.JWKSet; import com.nimbusds.jose.util.Base64URL; -import com.nimbusds.jose.util.JSONObjectUtils; import com.nimbusds.jwt.JWT; import com.nimbusds.jwt.SignedJWT; @@ -33,6 +32,7 @@ import org.apache.http.nio.reactor.ConnectingIOReactor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.SpecialPermission; import org.elasticsearch.action.ActionListener; @@ -45,11 +45,14 @@ import org.elasticsearch.common.ssl.SslConfiguration; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.ssl.SSLService; +import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.nio.charset.StandardCharsets; @@ -64,6 +67,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.function.Supplier; @@ -237,7 +241,13 @@ public static String serializeJwkSet(final JWKSet jwkSet, final boolean publicKe if (jwkSet == null) { return null; } - return JSONObjectUtils.toJSONString(jwkSet.toJSONObject(publicKeysOnly)); + Map jwkJson = jwkSet.toJSONObject(publicKeysOnly); + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + builder.map(jwkJson); + return Strings.toString(builder); + } catch (IOException e) { + throw new ElasticsearchException(e); + } } public static String serializeJwkHmacOidc(final JWK key) { diff --git a/x-pack/plugin/security/src/main/plugin-metadata/plugin-security.policy b/x-pack/plugin/security/src/main/plugin-metadata/plugin-security.policy index 2c9d38e5ae55e..b3d5e80e09dcd 100644 --- a/x-pack/plugin/security/src/main/plugin-metadata/plugin-security.policy +++ b/x-pack/plugin/security/src/main/plugin-metadata/plugin-security.policy @@ -51,3 +51,9 @@ grant codeBase "${codebase.netty-transport}" { // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; }; + +grant codeBase "${codebase.nimbus-jose-jwt-modified}" { + // for JSON serialization based on a shaded GSON dependency + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; +}; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtSignatureValidatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtSignatureValidatorTests.java index 3732573b2f03d..f1927876eba5f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtSignatureValidatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtSignatureValidatorTests.java @@ -266,7 +266,7 @@ public void testJwtSignVerifyPassedForAllSupportedAlgorithms() { try { helpTestSignatureAlgorithm(signatureAlgorithm, false); } catch (Exception e) { - fail("signature validation with algorithm [" + signatureAlgorithm + "] should have succeeded"); + throw new RuntimeException("signature validation with algorithm [" + signatureAlgorithm + "] should have succeeded", e); } } // Fail: "ES256K" diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java index be45394b01ec6..a95ecd88f6a8e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java @@ -6,12 +6,13 @@ */ package org.elasticsearch.xpack.security.authc.oidc; +import net.minidev.json.JSONStyle; +import net.minidev.json.JSONValue; +import net.minidev.json.reader.JsonWriterI; + import com.nimbusds.jose.JWSAlgorithm; import com.nimbusds.jose.JWSHeader; import com.nimbusds.jose.crypto.RSASSASigner; -import com.nimbusds.jose.shaded.json.JSONStyle; -import com.nimbusds.jose.shaded.json.JSONValue; -import com.nimbusds.jose.shaded.json.reader.JsonWriterI; import com.nimbusds.jwt.JWT; import com.nimbusds.jwt.JWTClaimsSet; import com.nimbusds.jwt.SignedJWT; From 0cf9c54f6583a82de05c4543b123c6c1f190092f Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 15 Aug 2024 12:00:41 -0700 Subject: [PATCH 058/389] Fix windows memory locking (#111866) Memory locking on Windows with the bundled jdk was broken by native access refactoring. This commit fixes the linking issue, as well as adds a packaging test to ensure memory locking is invoked on all supported platforms. --- docs/changelog/111866.yaml | 6 ++ .../nativeaccess/jdk/JdkKernel32Library.java | 4 +- .../packaging/test/MemoryLockingTests.java | 59 +++++++++++++++++++ 3 files changed, 67 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/111866.yaml create mode 100644 qa/packaging/src/test/java/org/elasticsearch/packaging/test/MemoryLockingTests.java diff --git a/docs/changelog/111866.yaml b/docs/changelog/111866.yaml new file mode 100644 index 0000000000000..34bf56da4dc9e --- /dev/null +++ b/docs/changelog/111866.yaml @@ -0,0 +1,6 @@ +pr: 111866 +summary: Fix windows memory locking +area: Infra/Core +type: bug +issues: + - 111847 diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkKernel32Library.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkKernel32Library.java index a3ddc0d59890d..0294b721aa6a8 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkKernel32Library.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkKernel32Library.java @@ -56,7 +56,7 @@ class JdkKernel32Library implements Kernel32Library { ); private static final MethodHandle SetProcessWorkingSetSize$mh = downcallHandleWithError( "SetProcessWorkingSetSize", - FunctionDescriptor.of(ADDRESS, JAVA_LONG, JAVA_LONG) + FunctionDescriptor.of(JAVA_BOOLEAN, ADDRESS, JAVA_LONG, JAVA_LONG) ); private static final MethodHandle GetCompressedFileSizeW$mh = downcallHandleWithError( "GetCompressedFileSizeW", @@ -115,7 +115,7 @@ static class JdkAddress implements Address { @Override public Address add(long offset) { - return new JdkAddress(MemorySegment.ofAddress(address.address())); + return new JdkAddress(MemorySegment.ofAddress(address.address() + offset)); } } diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/MemoryLockingTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/MemoryLockingTests.java new file mode 100644 index 0000000000000..82a17c54b6d69 --- /dev/null +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/MemoryLockingTests.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.packaging.test; + +import org.elasticsearch.packaging.util.ServerUtils; +import org.elasticsearch.packaging.util.docker.DockerRun; + +import java.util.Map; + +import static org.elasticsearch.packaging.util.docker.Docker.runContainer; +import static org.elasticsearch.packaging.util.docker.DockerRun.builder; + +public class MemoryLockingTests extends PackagingTestCase { + + public void test10Install() throws Exception { + install(); + } + + public void test20MemoryLockingEnabled() throws Exception { + configureAndRun( + Map.of( + "bootstrap.memory_lock", + "true", + "xpack.security.enabled", + "false", + "xpack.security.http.ssl.enabled", + "false", + "xpack.security.enrollment.enabled", + "false", + "discovery.type", + "single-node" + ) + ); + // TODO: very locking worked. logs? check memory of process? at least we know the process started successfully + stopElasticsearch(); + } + + public void configureAndRun(Map settings) throws Exception { + if (distribution().isDocker()) { + DockerRun builder = builder(); + settings.forEach(builder::envVar); + runContainer(distribution(), builder); + } else { + + for (var setting : settings.entrySet()) { + ServerUtils.addSettingToExistingConfiguration(installation.config, setting.getKey(), setting.getValue()); + } + ServerUtils.removeSettingFromExistingConfiguration(installation.config, "cluster.initial_master_nodes"); + } + + startElasticsearch(); + } +} From ffc22b2a8a503e387cc029df632a97e93f453566 Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Thu, 15 Aug 2024 12:14:29 -0700 Subject: [PATCH 059/389] Add audit_unenrolled_* attributes to fleet-agents template (#111909) --- .../template-resources/src/main/resources/fleet-agents.json | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json b/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json index ad66ad8796862..8b1c13f3152e8 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json @@ -319,6 +319,12 @@ }, "namespaces": { "type": "keyword" + }, + "audit_unenrolled_time": { + "type": "date" + }, + "audit_unenrolled_reason": { + "type": "keyword" } } } From 445be157665c1831fa6a83f510d697d02b3afff7 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 16 Aug 2024 15:21:52 +1000 Subject: [PATCH 060/389] Mute org.elasticsearch.xpack.test.rest.XPackRestIT org.elasticsearch.xpack.test.rest.XPackRestIT #111944 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 996f1e699c403..22adc4a8c44b5 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -185,6 +185,8 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/111923 - class: org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT issue: https://github.com/elastic/elasticsearch/issues/111923 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + issue: https://github.com/elastic/elasticsearch/issues/111944 # Examples: # From 1e40fe45d638017c797d81f2bdb294aea6365be1 Mon Sep 17 00:00:00 2001 From: Pius Date: Sat, 17 Aug 2024 05:52:26 -0700 Subject: [PATCH 061/389] Add 8.15.0 known issue for memory locking in Windows (#111949) --- docs/reference/release-notes/8.15.0.asciidoc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/reference/release-notes/8.15.0.asciidoc b/docs/reference/release-notes/8.15.0.asciidoc index 1df0969ecc629..395073683b102 100644 --- a/docs/reference/release-notes/8.15.0.asciidoc +++ b/docs/reference/release-notes/8.15.0.asciidoc @@ -32,6 +32,11 @@ Rollup:: Search:: * Change `skip_unavailable` remote cluster setting default value to true {es-pull}105792[#105792] +[[known-issues-8.15.0]] +[float] +=== Known issues +* Elasticsearch will not start on Windows machines when the recommended [bootstrap.memory_lock: true](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#bootstrap-memory_lock) setting is configured due to [native access refactoring](https://github.com/elastic/elasticsearch/pull/111866). The workaround for 8.15.0 is to downgrade to the previous version. This issue will be fixed in 8.15.1. + [[bug-8.15.0]] [float] === Bug fixes From 1fbd7e990b14006e2d10bd15f22c7a6791169c17 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 19 Aug 2024 06:19:04 +0100 Subject: [PATCH 062/389] Test get-snapshots API with missing details (#111903) Extends the test added in #111786 to check that the API still works correctly even in the BwC case that the details needed are not in the `RepositoryData` and must be read from the individual `SnapshotInfo` blobs. --- .../snapshots/GetSnapshotsIT.java | 126 ++++++++++++++++++ 1 file changed, 126 insertions(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index 66ddd47d7758d..477fd9737394e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -8,8 +8,12 @@ package org.elasticsearch.snapshots; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.delete.TransportDeleteRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.put.TransportPutRepositoryAction; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; @@ -23,17 +27,30 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.blobstore.fs.FsBlobStore; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Predicates; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; +import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; @@ -819,6 +836,17 @@ public void testAllFeatures() { } }); + if (randomBoolean()) { + // Sometimes also simulate bwc repository contents where some details are missing from the root blob + safeAwait(l -> { + try (var listeners = new RefCountingListener(l.map(v -> null))) { + for (final var repositoryName : randomSubsetOf(repositories)) { + removeDetailsForRandomSnapshots(repositoryName, listeners.acquire()); + } + } + }); + } + Predicate snapshotInfoPredicate = Predicates.always(); // {repository} path parameter @@ -1000,4 +1028,102 @@ public void testAllFeatures() { assertEquals(0, remaining); } + + /** + * Older versions of Elasticsearch don't record in {@link RepositoryData} all the details needed for the get-snapshots API to pick out + * the right snapshots, so in this case the API must fall back to reading those details from each candidate {@link SnapshotInfo} blob. + * Simulate this situation by manipulating the {@link RepositoryData} blob directly to remove all the optional details from some subset + * of its snapshots. + */ + private static void removeDetailsForRandomSnapshots(String repositoryName, ActionListener listener) { + final Set snapshotsWithoutDetails = ConcurrentCollections.newConcurrentSet(); + final var masterRepositoriesService = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class); + final var repository = asInstanceOf(FsRepository.class, masterRepositoriesService.repository(repositoryName)); + final var repositoryMetadata = repository.getMetadata(); + final var repositorySettings = repositoryMetadata.settings(); + final var repositoryDataBlobPath = asInstanceOf(FsBlobStore.class, repository.blobStore()).path() + .resolve(BlobStoreRepository.INDEX_FILE_PREFIX + repositoryMetadata.generation()); + + SubscribableListener + + // unregister the repository while we're mucking around with its internals + .newForked( + l -> client().execute( + TransportDeleteRepositoryAction.TYPE, + new DeleteRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName), + l + ) + ) + .andThenAccept(ElasticsearchAssertions::assertAcked) + + // rewrite the RepositoryData blob with some details removed + .andThenAccept(ignored -> { + // load the existing RepositoryData JSON blob as raw maps/lists/etc. + final var repositoryDataBytes = Files.readAllBytes(repositoryDataBlobPath); + final var repositoryDataMap = XContentHelper.convertToMap( + JsonXContent.jsonXContent, + repositoryDataBytes, + 0, + repositoryDataBytes.length, + true + ); + + // modify the contents + final var snapshotsList = asInstanceOf(List.class, repositoryDataMap.get("snapshots")); + for (final var snapshotObj : snapshotsList) { + if (randomBoolean()) { + continue; + } + final var snapshotMap = asInstanceOf(Map.class, snapshotObj); + snapshotsWithoutDetails.add( + new SnapshotId( + asInstanceOf(String.class, snapshotMap.get("name")), + asInstanceOf(String.class, snapshotMap.get("uuid")) + ) + ); + + // remove the optional details fields + assertNotNull(snapshotMap.remove("start_time_millis")); + assertNotNull(snapshotMap.remove("end_time_millis")); + assertNotNull(snapshotMap.remove("slm_policy")); + } + + // overwrite the RepositoryData JSON blob with its new contents + final var updatedRepositoryDataBytes = XContentTestUtils.convertToXContent(repositoryDataMap, XContentType.JSON); + try (var outputStream = Files.newOutputStream(repositoryDataBlobPath)) { + BytesRef bytesRef; + final var iterator = updatedRepositoryDataBytes.iterator(); + while ((bytesRef = iterator.next()) != null) { + outputStream.write(bytesRef.bytes, bytesRef.offset, bytesRef.length); + } + } + }) + + // re-register the repository + .andThen( + l -> client().execute( + TransportPutRepositoryAction.TYPE, + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName).type(FsRepository.TYPE) + .settings(repositorySettings), + l + ) + ) + .andThenAccept(ElasticsearchAssertions::assertAcked) + + // verify that the details are indeed now missing + .andThen( + l -> masterRepositoriesService.repository(repositoryName).getRepositoryData(EsExecutors.DIRECT_EXECUTOR_SERVICE, l) + ) + .andThenAccept(repositoryData -> { + for (SnapshotId snapshotId : repositoryData.getSnapshotIds()) { + assertEquals( + repositoryName + "/" + snapshotId.toString() + ": " + repositoryData.getSnapshotDetails(snapshotId), + snapshotsWithoutDetails.contains(snapshotId), + repositoryData.hasMissingDetails(snapshotId) + ); + } + }) + + .addListener(listener); + } } From a406333f8750ec76be11f96881295a9049ecdde4 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 19 Aug 2024 06:20:19 +0100 Subject: [PATCH 063/389] Revert "Add 8.15.0 known issue for memory locking in Windows (#111949)" This reverts commit 1e40fe45d638017c797d81f2bdb294aea6365be1. --- docs/reference/release-notes/8.15.0.asciidoc | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/reference/release-notes/8.15.0.asciidoc b/docs/reference/release-notes/8.15.0.asciidoc index 395073683b102..1df0969ecc629 100644 --- a/docs/reference/release-notes/8.15.0.asciidoc +++ b/docs/reference/release-notes/8.15.0.asciidoc @@ -32,11 +32,6 @@ Rollup:: Search:: * Change `skip_unavailable` remote cluster setting default value to true {es-pull}105792[#105792] -[[known-issues-8.15.0]] -[float] -=== Known issues -* Elasticsearch will not start on Windows machines when the recommended [bootstrap.memory_lock: true](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#bootstrap-memory_lock) setting is configured due to [native access refactoring](https://github.com/elastic/elasticsearch/pull/111866). The workaround for 8.15.0 is to downgrade to the previous version. This issue will be fixed in 8.15.1. - [[bug-8.15.0]] [float] === Bug fixes From fe7448e4e5ca3d41eb2371ea037c60df4a692f4b Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 19 Aug 2024 06:32:18 +0100 Subject: [PATCH 064/389] Introduce `StreamingXContentResponse` (#111933) Similar to `ChunkedZipResponse` (#109820) this utility allows Elasticsearch to send an `XContent`-based response constructed out of a sequence of `ChunkedToXContent` fragments, provided in a streaming and asynchronous fashion. This will enable #93735 to proceed without needing to create a temporary index to hold the intermediate results. --- .../rest/StreamingXContentResponseIT.java | 300 ++++++++++++ .../rest/StreamingXContentResponse.java | 435 ++++++++++++++++++ 2 files changed, 735 insertions(+) create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/rest/StreamingXContentResponseIT.java create mode 100644 server/src/main/java/org/elasticsearch/rest/StreamingXContentResponse.java diff --git a/server/src/internalClusterTest/java/org/elasticsearch/rest/StreamingXContentResponseIT.java b/server/src/internalClusterTest/java/org/elasticsearch/rest/StreamingXContentResponseIT.java new file mode 100644 index 0000000000000..ae91caea888db --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/rest/StreamingXContentResponseIT.java @@ -0,0 +1,300 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.rest; + +import org.apache.http.ConnectionClosedException; +import org.apache.http.HttpResponse; +import org.apache.http.nio.ContentDecoder; +import org.apache.http.nio.IOControl; +import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.apache.http.protocol.HttpContext; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.RefCountingRunnable; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.ThrottledIterator; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +import static org.hamcrest.Matchers.hasSize; + +@ESIntegTestCase.ClusterScope(numDataNodes = 1) +public class StreamingXContentResponseIT extends ESIntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; + } + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopyNoNullElements(super.nodePlugins(), RandomXContentResponsePlugin.class); + } + + public static class RandomXContentResponsePlugin extends Plugin implements ActionPlugin { + + public static final String ROUTE = "/_random_xcontent_response"; + + public static final String INFINITE_ROUTE = "/_random_infinite_xcontent_response"; + + public final AtomicReference responseRef = new AtomicReference<>(); + + public record Response(Map fragments, CountDownLatch completedLatch) {} + + @Override + public Collection getRestHandlers( + Settings settings, + NamedWriteableRegistry namedWriteableRegistry, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster, + Predicate clusterSupportsFeature + ) { + return List.of( + // handler that returns a normal (finite) response + new RestHandler() { + @Override + public List routes() { + return List.of(new Route(RestRequest.Method.GET, ROUTE)); + } + + @Override + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws IOException { + final var response = new Response(new HashMap<>(), new CountDownLatch(1)); + final var entryCount = between(0, 10000); + for (int i = 0; i < entryCount; i++) { + response.fragments().put(randomIdentifier(), randomIdentifier()); + } + assertTrue(responseRef.compareAndSet(null, response)); + handleStreamingXContentRestRequest( + channel, + client.threadPool(), + response.completedLatch(), + response.fragments().entrySet().iterator() + ); + } + }, + + // handler that just keeps on yielding chunks until aborted + new RestHandler() { + @Override + public List routes() { + return List.of(new Route(RestRequest.Method.GET, INFINITE_ROUTE)); + } + + @Override + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws IOException { + final var response = new Response(new HashMap<>(), new CountDownLatch(1)); + assertTrue(responseRef.compareAndSet(null, new Response(null, response.completedLatch()))); + handleStreamingXContentRestRequest(channel, client.threadPool(), response.completedLatch(), new Iterator<>() { + + private long id; + + // carry on yielding content even after the channel closes + private final Semaphore trailingContentPermits = new Semaphore(between(0, 20)); + + @Override + public boolean hasNext() { + return request.getHttpChannel().isOpen() || trailingContentPermits.tryAcquire(); + } + + @Override + public Map.Entry next() { + return new Map.Entry<>() { + private final String key = Long.toString(id++); + private final String content = randomIdentifier(); + + @Override + public String getKey() { + return key; + } + + @Override + public String getValue() { + return content; + } + + @Override + public String setValue(String value) { + return fail(null, "must not setValue"); + } + }; + } + }); + } + } + ); + } + + private static void handleStreamingXContentRestRequest( + RestChannel channel, + ThreadPool threadPool, + CountDownLatch completionLatch, + Iterator> fragmentIterator + ) throws IOException { + try (var refs = new RefCountingRunnable(completionLatch::countDown)) { + final var streamingXContentResponse = new StreamingXContentResponse(channel, channel.request(), refs.acquire()); + streamingXContentResponse.writeFragment(p -> ChunkedToXContentHelper.startObject(), refs.acquire()); + final var finalRef = refs.acquire(); + ThrottledIterator.run( + fragmentIterator, + (ref, fragment) -> randomFrom(EsExecutors.DIRECT_EXECUTOR_SERVICE, threadPool.generic()).execute( + ActionRunnable.run(ActionListener.releaseAfter(refs.acquireListener(), ref), () -> { + Thread.yield(); + streamingXContentResponse.writeFragment( + p -> ChunkedToXContentHelper.field(fragment.getKey(), fragment.getValue()), + refs.acquire() + ); + }) + ), + between(1, 10), + () -> {}, + () -> { + try (streamingXContentResponse; finalRef) { + streamingXContentResponse.writeFragment(p -> ChunkedToXContentHelper.endObject(), refs.acquire()); + } + } + ); + } + } + } + + public void testRandomStreamingXContentResponse() throws IOException { + final var request = new Request("GET", RandomXContentResponsePlugin.ROUTE); + final var response = getRestClient().performRequest(request); + final var actualEntries = XContentHelper.convertToMap(JsonXContent.jsonXContent, response.getEntity().getContent(), false); + assertEquals(getExpectedEntries(), actualEntries); + } + + public void testAbort() throws IOException { + final var request = new Request("GET", RandomXContentResponsePlugin.INFINITE_ROUTE); + final var responseStarted = new CountDownLatch(1); + final var bodyConsumed = new CountDownLatch(1); + request.setOptions(RequestOptions.DEFAULT.toBuilder().setHttpAsyncResponseConsumerFactory(() -> new HttpAsyncResponseConsumer<>() { + + final ByteBuffer readBuffer = ByteBuffer.allocate(ByteSizeUnit.KB.toIntBytes(4)); + int bytesToConsume = ByteSizeUnit.MB.toIntBytes(1); + + @Override + public void responseReceived(HttpResponse response) { + responseStarted.countDown(); + } + + @Override + public void consumeContent(ContentDecoder decoder, IOControl ioControl) throws IOException { + readBuffer.clear(); + final var bytesRead = decoder.read(readBuffer); + if (bytesRead > 0) { + bytesToConsume -= bytesRead; + } + + if (bytesToConsume <= 0) { + bodyConsumed.countDown(); + ioControl.shutdown(); + } + } + + @Override + public void responseCompleted(HttpContext context) {} + + @Override + public void failed(Exception ex) {} + + @Override + public Exception getException() { + return null; + } + + @Override + public HttpResponse getResult() { + return null; + } + + @Override + public boolean isDone() { + return false; + } + + @Override + public void close() {} + + @Override + public boolean cancel() { + return false; + } + })); + + try { + try (var restClient = createRestClient(internalCluster().getRandomNodeName())) { + // one-node REST client to avoid retries + expectThrows(ConnectionClosedException.class, () -> restClient.performRequest(request)); + } + safeAwait(responseStarted); + safeAwait(bodyConsumed); + } finally { + assertNull(getExpectedEntries()); // mainly just checking that all refs are released + } + } + + private static Map getExpectedEntries() { + final List> nodeResponses = StreamSupport + // concatenate all the chunks in all the entries + .stream(internalCluster().getInstances(PluginsService.class).spliterator(), false) + .flatMap(p -> p.filterPlugins(RandomXContentResponsePlugin.class)) + .flatMap(p -> { + final var response = p.responseRef.getAndSet(null); + if (response == null) { + return Stream.of(); + } else { + safeAwait(response.completedLatch()); // ensures that all refs have been released + return Stream.of(response.fragments()); + } + }) + .toList(); + assertThat(nodeResponses, hasSize(1)); + return nodeResponses.get(0); + } +} diff --git a/server/src/main/java/org/elasticsearch/rest/StreamingXContentResponse.java b/server/src/main/java/org/elasticsearch/rest/StreamingXContentResponse.java new file mode 100644 index 0000000000000..9f20416ff8b06 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/StreamingXContentResponse.java @@ -0,0 +1,435 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.rest; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.ReleasableBytesReference; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.io.stream.BytesStream; +import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; +import org.elasticsearch.common.recycler.Recycler; +import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.core.Streams; +import org.elasticsearch.transport.Transports; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Queue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * A REST response with an XContent body to which the caller can write fragments of content in an asynchronous and streaming fashion. + *

+ * Callers submit individual fragments of content using {@link #writeFragment}. Internally, the output entries are held in a queue. + * If the queue becomes empty then the response transmission is paused until the next entry becomes available. + *

+ * The internal queue is unbounded. It is the caller's responsibility to ensure that the response does not consume an excess of resources + * while it's being sent. + *

+ * The caller must eventually call {@link StreamingXContentResponse#close} to finish the transmission of the response. + */ +public final class StreamingXContentResponse implements Releasable { + + /** + * The underlying stream that collects the raw bytes to be transmitted. Mutable, because we collect the contents of each chunk in a + * distinct stream that is held in this field while that chunk is under construction. + */ + @Nullable // if there's no chunk under construction + private BytesStream targetStream; + + private final XContentBuilder xContentBuilder; + + private final RestChannel restChannel; + private final ToXContent.Params params; + private final Releasable onCompletion; + + /** + * A listener for the next fragment to become available for transmission after a pause. Completed with the newly-created unique active + * {@link AvailableFragmentsResponseBodyPart} within {@link #writeFragment}, and subscribed to via + * {@link AvailableFragmentsResponseBodyPart#getNextPart} when the current {@link AvailableFragmentsResponseBodyPart} + * becomes inactive because of a transmission pause. + */ + @Nullable // if the first fragment hasn't been sent yet + private SubscribableListener nextAvailableFragmentListener; + + /** + * A resource to be released when the transmission of the current fragment is complete. Note that we may complete the transmission of + * multiple fragments at the same time, if they are all processed by one call to {@link AvailableFragmentsResponseBodyPart#encodeChunk} + * and transmitted together. + */ + @Nullable // if not currently sending a fragment + private Releasable currentFragmentReleasable; + + /** + * @param restChannel The {@link RestChannel} on which to send the response. + * @param params The {@link ToXContent.Params} to control the serialization. + * @param onCompletion A resource which is released when the transmission is complete. + */ + public StreamingXContentResponse(RestChannel restChannel, ToXContent.Params params, Releasable onCompletion) throws IOException { + this.restChannel = restChannel; + this.params = params; + this.onCompletion = onCompletion; + this.xContentBuilder = restChannel.newBuilder( + restChannel.request().getXContentType(), + null, + true, + Streams.noCloseStream(new OutputStream() { + @Override + public void write(int b) throws IOException { + assert targetStream != null; + targetStream.write(b); + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + assert targetStream != null; + targetStream.write(b, off, len); + } + }) + ); + } + + /** + * Close this {@link StreamingXContentResponse}, indicating that there will be no more fragments to send. + */ + @Override + public void close() { + writeFragment(p -> NO_MORE_FRAGMENTS, () -> { + if (isRestResponseFinished.compareAndSet(false, true)) { + queueRefs.decRef(); + } + }); + } + + private Iterator getChunksIterator(StreamingFragment fragment) { + return xContentBuilder.getRestApiVersion() == RestApiVersion.V_7 + ? fragment.fragment().toXContentChunkedV7(params) + : fragment.fragment().toXContentChunked(params); + } + + /** + * Enqueue the given fragment for transmission. + * @param fragment The fragment to send. + * @param releasable A resource which is released when the fragment has been completely processed, i.e. when + *

    + *
  • it has been fully sent, or
  • + *
  • the overall response was cancelled before completion and all resources related to the partial transmission of + * this fragment have been released.
  • + *
+ */ + public void writeFragment(ChunkedToXContent fragment, Releasable releasable) { + if (tryAcquireQueueRef()) { + try { + fragmentQueue.add(new StreamingFragment(fragment, releasable)); + if (queueLength.getAndIncrement() == 0) { + // There is no active AvailableChunksZipResponseBodyPart, but there is now an entry in the queue, so we must create a + // AvailableChunksZipResponseBodyPart to process it (along with any other entries that are concurrently added to the + // queue). It's safe to mutate releasable and continuationListener here because they are only otherwise accessed by an + // active AvailableChunksZipResponseBodyPart (which does not exist) or when all queueRefs have been released (which they + // have not here). + final var nextFragment = fragmentQueue.poll(); + assert nextFragment != null; + final var availableFragments = new AvailableFragmentsResponseBodyPart(getChunksIterator(nextFragment)); + assert currentFragmentReleasable == null; + currentFragmentReleasable = nextFragment.releasable(); + final var currentAvailableFragmentListener = nextAvailableFragmentListener; + nextAvailableFragmentListener = new SubscribableListener<>(); + if (currentAvailableFragmentListener == null) { + // We are not resuming after a pause, this is the first fragment to be sent, so we start the response transmission. + restChannel.sendResponse(RestResponse.chunked(RestStatus.OK, availableFragments, this::restResponseFinished)); + } else { + // We are resuming transmission after a pause, so just carry on sending the response body. + assert currentAvailableFragmentListener.isDone() == false; + currentAvailableFragmentListener.onResponse(availableFragments); + } + } + } finally { + queueRefs.decRef(); + } + } else { + Releasables.closeExpectNoException(releasable); + } + } + + /** + * A fragment which is ready for transmission, to be stored in {@link #fragmentQueue}. + * + * @param fragment The fragment of XContent to send. + * @param releasable A resource to release when this fragment has been fully transmitted, or is no longer required because the + * transmission was cancelled. + */ + private record StreamingFragment(ChunkedToXContent fragment, Releasable releasable) {} + + /** + * Queue of fragments that are ready for transmission. + */ + private final Queue fragmentQueue = new LinkedBlockingQueue<>(); + + /** + * Upper bound on the number of fragments in the queue, atomically modified to ensure there's only one thread processing the queue + * at once. + */ + private final AtomicInteger queueLength = new AtomicInteger(); + + /** + * Ref-counting for access to the queue, to avoid clearing the queue on abort concurrently with a fragment being sent. + */ + private final RefCounted queueRefs = AbstractRefCounted.of(this::drainQueue); + + /** + * Flag to indicate if the request has been aborted, at which point we should stop enqueueing more fragments and promptly clean up the + * ones being sent. It's safe to ignore this, but without it in theory a constant stream of calls to {@link #writeFragment} could + * prevent {@link #drainQueue} from running for arbitrarily long. + */ + private final AtomicBoolean isRestResponseFinished = new AtomicBoolean(); + + private boolean tryAcquireQueueRef() { + return isRestResponseFinished.get() == false && queueRefs.tryIncRef(); + } + + private void restResponseFinished() { + assert Transports.assertTransportThread(); + if (isRestResponseFinished.compareAndSet(false, true)) { + queueRefs.decRef(); + } + } + + private void drainQueue() { + assert isRestResponseFinished.get(); + assert queueRefs.hasReferences() == false; + final var taskCount = queueLength.get() + 2 /* currentFragmentReleasable and onCompletion */ ; + final var releasables = new ArrayList(taskCount); + try { + releasables.add(currentFragmentReleasable); + currentFragmentReleasable = null; + StreamingFragment fragment; + while ((fragment = fragmentQueue.poll()) != null) { + releasables.add(fragment.releasable()); + } + assert fragmentQueue.isEmpty() : fragmentQueue.size(); // no concurrent adds + assert releasables.size() == taskCount - 1 || releasables.size() == taskCount - 2 : taskCount + " vs " + releasables.size(); + } finally { + releasables.add(onCompletion); + Releasables.closeExpectNoException(Releasables.wrap(releasables)); + } + } + + /** + * A {@link ChunkedRestResponseBodyPart} which will yield all currently-available fragments by consuming from {@link #fragmentQueue}. + * There is only ever at most one active instance of this class at any time, in the sense that one such instance becoming inactive + * happens-before the creation of the next instance. One of these parts may send chunks for more than one fragment. + */ + private final class AvailableFragmentsResponseBodyPart implements ChunkedRestResponseBodyPart { + + /** + * An iterator over the chunks of the fragment currently being transmitted. + */ + private Iterator fragmentChunksIterator; + + /** + * True when we have run out of chunks ready for immediate transmission, so the response is paused, but we expect to send more data + * later. + */ + private boolean isResponsePaused; + + /** + * True when we have sent the last chunk of the last fragment, or the response was cancelled. + */ + private boolean isResponseComplete; + + /** + * A listener which is created when there are no more available fragments, so transmission is paused, subscribed to in + * {@link #getNextPart}, and then completed with the next body part (sequence of fragments, i.e. a new (unique) active + * {@link AvailableFragmentsResponseBodyPart}). + */ + private SubscribableListener getNextPartListener; + + /** + * A cache for an empty list to be used to collect the {@code Releasable} instances to be released when the next chunk has been + * fully transmitted. It's a list because a call to {@link #encodeChunk} may yield a chunk that completes several fragments, each of + * which has its own resources to release. We cache this value across chunks because most chunks won't release anything, so we can + * keep the empty list around for later to save on allocations. + */ + private ArrayList nextReleasablesCache = new ArrayList<>(); + + AvailableFragmentsResponseBodyPart(Iterator fragmentChunksIterator) { + this.fragmentChunksIterator = fragmentChunksIterator; + } + + /** + * @return whether this part of the response is complete + */ + @Override + public boolean isPartComplete() { + return isResponsePaused || isResponseComplete; + } + + @Override + public boolean isLastPart() { + return isResponseComplete; + } + + @Override + public void getNextPart(ActionListener listener) { + assert getNextPartListener != null; + getNextPartListener.addListener(listener); + } + + /** + * Transfer {@link #currentFragmentReleasable} into the supplied collection (i.e. add it to {@code releasables} and then clear + * {@link #currentFragmentReleasable}). Called when the last chunk of the current fragment is serialized, so that we + * can start serializing chunks of the next fragment straight away whilst delaying the release of the current fragment's resources + * until the transmission of the chunk that is currently under construction. + */ + private void transferCurrentFragmentReleasable(ArrayList releasables) { + assert queueRefs.hasReferences(); + + if (currentFragmentReleasable == null) { + return; + } + + if (releasables == nextReleasablesCache) { + // adding the first value, so we must line up a new cached value for the next caller + nextReleasablesCache = new ArrayList<>(); + } + + releasables.add(currentFragmentReleasable); + currentFragmentReleasable = null; + } + + @Override + public ReleasableBytesReference encodeChunk(int sizeHint, Recycler recycler) throws IOException { + assert Transports.isTransportThread(Thread.currentThread()); + + final ArrayList releasables = nextReleasablesCache; + assert releasables.isEmpty(); + try { + if (tryAcquireQueueRef()) { + try { + assert queueLength.get() > 0; + // This is the current unique active AvailableFragmentsResponseBodyPart (i.e. queueLength is strictly positive and + // we hold a queueRef), so any concurrent calls to writeFragment() at this point will just add to the queue and + // won't spawn a new AvailableFragmentsResponseBodyPart or mutate any fields. + + final RecyclerBytesStreamOutput chunkStream = new RecyclerBytesStreamOutput(recycler); + assert targetStream == null; + targetStream = chunkStream; + + do { + if (fragmentChunksIterator.hasNext()) { + fragmentChunksIterator.next().toXContent(xContentBuilder, params); + } else { + completeCurrentFragment(releasables); + } + } while (isResponseComplete == false && isResponsePaused == false && chunkStream.size() < sizeHint); + + assert (releasables == nextReleasablesCache) == releasables.isEmpty(); + assert nextReleasablesCache.isEmpty(); + + final Releasable chunkStreamReleasable = () -> Releasables.closeExpectNoException(chunkStream); + final var result = new ReleasableBytesReference( + chunkStream.bytes(), + releasables.isEmpty() + ? chunkStreamReleasable + : Releasables.wrap(Iterators.concat(Iterators.single(chunkStreamReleasable), releasables.iterator())) + ); + targetStream = null; + return result; + } finally { + queueRefs.decRef(); + } + } else { + // request aborted, nothing more to send (queue is being cleared by queueRefs#closeInternal) + isResponseComplete = true; + return new ReleasableBytesReference(BytesArray.EMPTY, () -> {}); + } + } catch (Exception e) { + logger.error("failure encoding chunk", e); + throw e; + } finally { + if (targetStream != null) { + assert false : "failure encoding chunk"; + IOUtils.closeWhileHandlingException(targetStream, Releasables.wrap(releasables)); + targetStream = null; + } + } + } + + private void completeCurrentFragment(ArrayList releasables) throws IOException { + transferCurrentFragmentReleasable(releasables); + final var localNextAvailableFragmentListener = nextAvailableFragmentListener; // read before queue len decr + final var newQueueLength = queueLength.decrementAndGet(); + if (fragmentChunksIterator == NO_MORE_FRAGMENTS) { + // The current fragment is the last-fragment sentinel, so we stop processing the queue completely. Note + // that closing the XContentBuilder here ensures that the response is well-formed - it's up to the + // caller to ensure this, even if errors occur. + xContentBuilder.close(); + isResponseComplete = true; + } else if (newQueueLength == 0) { + // The current fragment is complete, but the next fragment isn't available yet, so we pause + // transmission. This means we are no longer an active AvailableFragmentsResponseBodyPart, so any + // concurrent calls to writeFragment() at this point will now spawn a new + // AvailableFragmentsResponseBodyPart to take our place. + xContentBuilder.flush(); + isResponsePaused = true; + assert getNextPartListener == null; + assert localNextAvailableFragmentListener != null; + // Calling our getNextPart() will eventually yield the next fragment supplied to writeFragment(): + getNextPartListener = localNextAvailableFragmentListener; + } else { + // The current fragment is complete, and the next fragment is already available, so we start sending its + // chunks too. This means we're still the unique active AvailableFragmentsResponseBodyPart. We re-use + // this AvailableFragmentsResponseBodyPart instance rather than creating a new one to avoid unnecessary + // allocations. + + final var nextFragment = fragmentQueue.poll(); + assert nextFragment != null; + currentFragmentReleasable = nextFragment.releasable(); + fragmentChunksIterator = getChunksIterator(nextFragment); + } + } + + @Override + public String getResponseContentTypeString() { + return xContentBuilder.getResponseContentTypeString(); + } + } + + /** + * Sentinel fragment indicating the end of the response. + */ + private static final Iterator NO_MORE_FRAGMENTS = new Iterator<>() { + @Override + public boolean hasNext() { + return false; + } + + @Override + public ToXContent next() { + assert false : "not called"; + return ToXContent.EMPTY; + } + }; +} From 1222496cd0f602d7769e7fcdbc59b84685356d6e Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 19 Aug 2024 06:35:52 +0100 Subject: [PATCH 065/389] Improve reaction to blob store corruptions (#111954) Today there are a couple of assertions that can trip if the contents of a snapshot repostiory are corrupted. It makes sense to assert the integrity of snapshots in most tests, but we must also (a) protect against these corruptions in production and (b) allow some tests to verify the behaviour of the system when the repository is corrupted. This commit introduces a flag to disable certain assertions, converts the relevant assertions into production failures too, and introduces a high-level test to verify that we do detect all relevant corruptions without tripping any other assertions. Extracted from #93735 as this change makes sense in its own right. Relates #52622. --- docs/reference/release-notes/8.15.0.asciidoc | 2 + .../blobstore/BlobStoreCorruptionIT.java | 186 ++++++++++++++++++ .../BlobStoreIndexShardSnapshot.java | 18 +- .../BlobStoreIndexShardSnapshots.java | 9 +- ...ndexShardSnapshotsIntegritySuppressor.java | 27 +++ .../blobstore/RepositoryFileType.java | 60 ++++++ 6 files changed, 300 insertions(+), 2 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreCorruptionIT.java create mode 100644 test/framework/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshotsIntegritySuppressor.java create mode 100644 test/framework/src/main/java/org/elasticsearch/repositories/blobstore/RepositoryFileType.java diff --git a/docs/reference/release-notes/8.15.0.asciidoc b/docs/reference/release-notes/8.15.0.asciidoc index 1df0969ecc629..80e935e130678 100644 --- a/docs/reference/release-notes/8.15.0.asciidoc +++ b/docs/reference/release-notes/8.15.0.asciidoc @@ -16,6 +16,8 @@ after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#11053 * Pipeline aggregations under `time_series` and `categorize_text` aggregations are never returned (issue: {es-issue}111679[#111679]) +* Elasticsearch will not start on Windows machines when the recommended [bootstrap.memory_lock: true](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#bootstrap-memory_lock) setting is configured due to [native access refactoring](https://github.com/elastic/elasticsearch/pull/111866). The workaround for 8.15.0 is to downgrade to the previous version. This issue will be fixed in 8.15.1. + [[breaking-8.15.0]] [float] === Breaking changes diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreCorruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreCorruptionIT.java new file mode 100644 index 0000000000000..422696d6b61c6 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreCorruptionIT.java @@ -0,0 +1,186 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.blobstore; + +import org.apache.lucene.tests.mockfile.ExtrasFS; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.support.ActionTestUtils; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshotsIntegritySuppressor; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.junit.Before; + +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.ArrayList; +import java.util.Base64; +import java.util.List; + +public class BlobStoreCorruptionIT extends AbstractSnapshotIntegTestCase { + + private static final Logger logger = LogManager.getLogger(BlobStoreCorruptionIT.class); + + @Before + public void suppressConsistencyCheck() { + disableRepoConsistencyCheck("testing corruption detection involves breaking the repo"); + } + + public void testCorruptionDetection() throws Exception { + final var repositoryName = randomIdentifier(); + final var indexName = randomIdentifier(); + final var snapshotName = randomIdentifier(); + final var repositoryRootPath = randomRepoPath(); + + createRepository(repositoryName, FsRepository.TYPE, repositoryRootPath); + createIndexWithRandomDocs(indexName, between(1, 100)); + flushAndRefresh(indexName); + createSnapshot(repositoryName, snapshotName, List.of(indexName)); + + final var corruptedFile = corruptRandomFile(repositoryRootPath); + final var corruptedFileType = RepositoryFileType.getRepositoryFileType(repositoryRootPath, corruptedFile); + final var corruptionDetectors = new ArrayList, ?>>(); + + // detect corruption by listing the snapshots + if (corruptedFileType == RepositoryFileType.SNAPSHOT_INFO) { + corruptionDetectors.add(exceptionListener -> { + logger.info("--> listing snapshots"); + client().admin() + .cluster() + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName) + .execute(ActionTestUtils.assertNoSuccessListener(exceptionListener::onResponse)); + }); + } + + // detect corruption by taking another snapshot + if (corruptedFileType == RepositoryFileType.SHARD_GENERATION) { + corruptionDetectors.add(exceptionListener -> { + logger.info("--> taking another snapshot"); + client().admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, randomIdentifier()) + .setWaitForCompletion(true) + .execute(exceptionListener.map(createSnapshotResponse -> { + assertNotEquals(SnapshotState.SUCCESS, createSnapshotResponse.getSnapshotInfo().state()); + return new ElasticsearchException("create-snapshot failed as expected"); + })); + }); + } + + // detect corruption by restoring the snapshot + switch (corruptedFileType) { + case SNAPSHOT_INFO, GLOBAL_METADATA, INDEX_METADATA -> corruptionDetectors.add(exceptionListener -> { + logger.info("--> restoring snapshot"); + client().admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName) + .setRestoreGlobalState(corruptedFileType == RepositoryFileType.GLOBAL_METADATA || randomBoolean()) + .setWaitForCompletion(true) + .execute(ActionTestUtils.assertNoSuccessListener(exceptionListener::onResponse)); + }); + case SHARD_SNAPSHOT_INFO, SHARD_DATA -> corruptionDetectors.add(exceptionListener -> { + logger.info("--> restoring snapshot and checking for failed shards"); + SubscribableListener + // if shard-level data is corrupted then the overall restore succeeds but the shard recoveries fail + .newForked(l -> client().admin().indices().prepareDelete(indexName).execute(l)) + .andThenAccept(ElasticsearchAssertions::assertAcked) + + .andThen( + l -> client().admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName) + .setRestoreGlobalState(randomBoolean()) + .setWaitForCompletion(true) + .execute(l) + ) + + .addListener(exceptionListener.map(restoreSnapshotResponse -> { + assertNotEquals(0, restoreSnapshotResponse.getRestoreInfo().failedShards()); + return new ElasticsearchException("post-restore recoveries failed as expected"); + })); + }); + } + + try (var ignored = new BlobStoreIndexShardSnapshotsIntegritySuppressor()) { + final var exception = safeAwait(randomFrom(corruptionDetectors)); + logger.info(Strings.format("--> corrupted [%s] and caught exception", corruptedFile), exception); + } + } + + private static Path corruptRandomFile(Path repositoryRootPath) throws IOException { + final var corruptedFileType = getRandomCorruptibleFileType(); + final var corruptedFile = getRandomFileToCorrupt(repositoryRootPath, corruptedFileType); + if (randomBoolean()) { + logger.info("--> deleting [{}]", corruptedFile); + Files.delete(corruptedFile); + } else { + corruptFileContents(corruptedFile); + } + return corruptedFile; + } + + private static void corruptFileContents(Path fileToCorrupt) throws IOException { + final var oldFileContents = Files.readAllBytes(fileToCorrupt); + logger.info("--> contents of [{}] before corruption: [{}]", fileToCorrupt, Base64.getEncoder().encodeToString(oldFileContents)); + final byte[] newFileContents = new byte[randomBoolean() ? oldFileContents.length : between(0, oldFileContents.length)]; + System.arraycopy(oldFileContents, 0, newFileContents, 0, newFileContents.length); + if (newFileContents.length == oldFileContents.length) { + final var corruptionPosition = between(0, newFileContents.length - 1); + newFileContents[corruptionPosition] = randomValueOtherThan(oldFileContents[corruptionPosition], ESTestCase::randomByte); + logger.info( + "--> updating byte at position [{}] from [{}] to [{}]", + corruptionPosition, + oldFileContents[corruptionPosition], + newFileContents[corruptionPosition] + ); + } else { + logger.info("--> truncating file from length [{}] to length [{}]", oldFileContents.length, newFileContents.length); + } + Files.write(fileToCorrupt, newFileContents); + logger.info("--> contents of [{}] after corruption: [{}]", fileToCorrupt, Base64.getEncoder().encodeToString(newFileContents)); + } + + private static RepositoryFileType getRandomCorruptibleFileType() { + return randomValueOtherThanMany( + // these blob types do not have reliable corruption detection, so we must skip them + t -> t == RepositoryFileType.ROOT_INDEX_N || t == RepositoryFileType.ROOT_INDEX_LATEST, + () -> randomFrom(RepositoryFileType.values()) + ); + } + + private static Path getRandomFileToCorrupt(Path repositoryRootPath, RepositoryFileType corruptedFileType) throws IOException { + final var corruptibleFiles = new ArrayList(); + Files.walkFileTree(repositoryRootPath, new SimpleFileVisitor<>() { + @Override + public FileVisitResult visitFile(Path filePath, BasicFileAttributes attrs) throws IOException { + if (ExtrasFS.isExtra(filePath.getFileName().toString()) == false + && RepositoryFileType.getRepositoryFileType(repositoryRootPath, filePath) == corruptedFileType) { + corruptibleFiles.add(filePath); + } + return super.visitFile(filePath, attrs); + } + }); + return randomFrom(corruptibleFiles); + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index 2a8fe96151c11..817ecb4601d59 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Nullable; +import org.elasticsearch.gateway.CorruptStateException; import org.elasticsearch.index.store.StoreFileMetadata; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentFragment; @@ -318,7 +319,11 @@ public static FileInfo fromXContent(XContentParser parser) throws IOException { } case WRITER_UUID -> { writerUuid = new BytesRef(parser.binaryValue()); - assert writerUuid.length > 0; + assert BlobStoreIndexShardSnapshots.INTEGRITY_ASSERTIONS_ENABLED == false || writerUuid.length > 0; + if (writerUuid.length == 0) { + // we never write UNAVAILABLE_WRITER_UUID, so this must be due to corruption + throw new ElasticsearchParseException("invalid (empty) writer uuid"); + } } default -> XContentParserUtils.throwUnknownField(currentFieldName, parser); } @@ -336,6 +341,12 @@ public static FileInfo fromXContent(XContentParser parser) throws IOException { } else if (checksum == null) { throw new ElasticsearchParseException("missing checksum for name [" + name + "]"); } + try { + // check for corruption before asserting writtenBy is parseable in the StoreFileMetadata constructor + org.apache.lucene.util.Version.parse(writtenBy); + } catch (Exception e) { + throw new ElasticsearchParseException("invalid written_by [" + writtenBy + "]"); + } return new FileInfo(name, new StoreFileMetadata(physicalName, length, checksum, writtenBy, metaHash, writerUuid), partSize); } @@ -566,6 +577,11 @@ public static BlobStoreIndexShardSnapshot fromXContent(XContentParser parser) th } } + // check for corruption before asserting snapshot != null in the BlobStoreIndexShardSnapshot ctor + if (snapshot == null) { + throw new CorruptStateException("snapshot missing"); + } + return new BlobStoreIndexShardSnapshot( snapshot, indexFiles == null ? List.of() : indexFiles, diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java index b17545a4cbeb6..30fbbba5ed095 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java @@ -264,6 +264,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + static volatile boolean INTEGRITY_ASSERTIONS_ENABLED = true; + public static BlobStoreIndexShardSnapshots fromXContent(XContentParser parser) throws IOException { XContentParser.Token token = parser.currentToken(); if (token == null) { // New parser @@ -317,7 +319,12 @@ public static BlobStoreIndexShardSnapshots fromXContent(XContentParser parser) t List fileInfosBuilder = new ArrayList<>(); for (String file : entry.v2()) { FileInfo fileInfo = files.get(file); - assert fileInfo != null; + if (fileInfo == null) { + // could happen in production if the repo contents are corrupted + final var exception = new IllegalStateException("shard index inconsistent at file [" + file + "]"); + assert INTEGRITY_ASSERTIONS_ENABLED == false : exception; + throw exception; + } fileInfosBuilder.add(fileInfo); } snapshots.add(new SnapshotFiles(entry.v1(), Collections.unmodifiableList(fileInfosBuilder), historyUUIDs.get(entry.v1()))); diff --git a/test/framework/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshotsIntegritySuppressor.java b/test/framework/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshotsIntegritySuppressor.java new file mode 100644 index 0000000000000..511116d9b2125 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshotsIntegritySuppressor.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.snapshots.blobstore; + +import org.elasticsearch.core.Releasable; + +/** + * Test utility class to suppress assertions about the integrity of the contents of a blobstore repository, in order to verify the + * production behaviour on encountering invalid data. + */ +public class BlobStoreIndexShardSnapshotsIntegritySuppressor implements Releasable { + + public BlobStoreIndexShardSnapshotsIntegritySuppressor() { + BlobStoreIndexShardSnapshots.INTEGRITY_ASSERTIONS_ENABLED = false; + } + + @Override + public void close() { + BlobStoreIndexShardSnapshots.INTEGRITY_ASSERTIONS_ENABLED = true; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/RepositoryFileType.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/RepositoryFileType.java new file mode 100644 index 0000000000000..014cbcd2bcc3a --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/RepositoryFileType.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.blobstore; + +import org.elasticsearch.common.Strings; + +import java.nio.file.Path; +import java.util.regex.Pattern; + +/** + * The types of blobs in a {@link BlobStoreRepository}. + */ +public enum RepositoryFileType { + + ROOT_INDEX_N("index-NUM"), + ROOT_INDEX_LATEST("index.latest"), + SNAPSHOT_INFO("snap-UUID.dat"), + GLOBAL_METADATA("meta-UUID.dat"), + INDEX_METADATA("indices/UUID/meta-SHORTUUID.dat"), + SHARD_GENERATION("indices/UUID/NUM/index-UUID"), + SHARD_SNAPSHOT_INFO("indices/UUID/NUM/snap-UUID.dat"), + SHARD_DATA("indices/UUID/NUM/__UUID"), + // NB no support for legacy names (yet) + ; + + private final Pattern pattern; + + RepositoryFileType(String regex) { + pattern = Pattern.compile( + "^(" + + regex + // decimal numbers + .replace("NUM", "(0|[1-9][0-9]*)") + // 15-byte UUIDS from TimeBasedUUIDGenerator + .replace("SHORTUUID", "[0-9a-zA-Z_-]{20}") + // 16-byte UUIDs from RandomBasedUUIDGenerator + .replace("UUID", "[0-9a-zA-Z_-]{22}") + + ")$" + ); + } + + public static RepositoryFileType getRepositoryFileType(Path repositoryRoot, Path blobPath) { + final var relativePath = repositoryRoot.relativize(blobPath).toString().replace(repositoryRoot.getFileSystem().getSeparator(), "/"); + for (final var repositoryFileType : RepositoryFileType.values()) { + if (repositoryFileType.pattern.matcher(relativePath).matches()) { + return repositoryFileType; + } + } + throw new IllegalArgumentException( + Strings.format("[%s] is not the path of a known blob type within [%s]", relativePath, repositoryRoot) + ); + } + +} From 4e1a84c8831cb73ea11aad1e53b6401e1b89677d Mon Sep 17 00:00:00 2001 From: Gergely Kalapos Date: Mon, 19 Aug 2024 09:24:47 +0200 Subject: [PATCH 066/389] x-pack/plugin/otel: introduce x-pack-otel plugin (#111091) * Add YamlTemplateRegistry and OtelIndexTemplateRegistry with resource YAML files * Fix traces-otel template * Adding first yml tests * Base APMIndexTemplateRegistry on YamlTemplateRegistry * Update OTelPlugin.java * Update APMIndexTemplateRegistry.java * Update YamlIngestPipelineConfig.java * Adding traces tests * Update x-pack/plugin/otel-data/src/main/resources/component-templates/ecs-tsdb@mappings.yaml Co-authored-by: Felix Barnsteiner * Add mapper-version * Fix code-style * Rename `status.status_code` to `status.code` * Update otel@mappings.yaml Revert back to date due to missing support in ES|QL for date_nanos * Move dynamic_templates to metrics@mappings in core * Run gradlew :x-pack:plugin:core:spotlessApply * Update x-pack/plugin/otel-data/src/main/resources/component-templates/metrics-otel@mappings.yaml Co-authored-by: Carson Ip * Update 20_metic_tests.yml Workaround for TSDB timestamp issue: we push a custom template with higher priority and set time_series.start_time. * Update CODEOWNERS Adding obs-ds-intake-services as owner of the new otel-data plugin. Since we had some changes, also updating the owner of apm-data to the same team. * Change dynamic: strict to false * Skip "Reject invalid top level field" test * Update 20_metic_tests.yml * Add boolean as dimension test (skipping it for now) * Add booleans_to_keywords and enable corresponding test * Remove processor.event top level mapping Reason: for metrics and logs we can rely on the name of the datastream. For spans vs. transactions there are other fields we can use. * Remove booleans_to_keywords Because booleans are supported now as dimension on TSDB * Add alias service.language.name -> telemetry.sdk.language * cleanup * Update README.md * Update README.md * Update docs/changelog/111091.yaml * Move traces@settings and traces@mappings to core * Update traces-otel@mappings.yaml * Review feedback * Adapt `match` style in tests * Update docs/changelog/111091.yaml * Apply suggestions from code review Co-authored-by: Vishal Raj * Update x-pack/plugin/otel-data/src/main/resources/component-templates/traces-otel@mappings.yaml Co-authored-by: Carson Ip * Changing trace_flags to long Related discussion: https://github.com/elastic/elasticsearch/pull/111091#discussion_r1706698491 * Remove trace_flags see: https://github.com/elastic/opentelemetry-dev/pull/368#pullrequestreview-2229633970 * Apply suggestions from code review Co-authored-by: Andrew Wilkins * Review feedback * Add store_array_source for span links * Define constant `data_stream.type` in `template.yaml`s * Create package-info.java * Move ecs-tsdb@mappings to index template Add test to verify that @custom template can add dynamic templates with a higher precedence * Update metrics@mappings.json Remove summary_gauge and summary_counter since they are covered by summary_metrics * Move clusterService.getClusterSettings().addSettingsUpdateConsumer to registry * Fix code-style * Update x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs.tests.yml Co-authored-by: Felix Barnsteiner * Enable logsdb * Update traces@settings.json No lifecycle needed for OTel at this point --------- Co-authored-by: Felix Barnsteiner Co-authored-by: Carson Ip Co-authored-by: Vishal Raj Co-authored-by: Andrew Wilkins Co-authored-by: Elastic Machine Co-authored-by: Felix Barnsteiner --- .github/CODEOWNERS | 8 +- docs/changelog/111091.yaml | 5 + .../apmdata/APMIndexTemplateRegistry.java | 163 ++----------- .../xpack/apmdata/APMPlugin.java | 1 - .../xpack/apmdata/ResourceUtils.java | 43 ---- .../component-templates/traces@mappings.yaml | 11 - .../src/main/resources/resources.yaml | 1 - .../APMIndexTemplateRegistryTests.java | 3 +- .../xpack/core/ClientHelper.java | 1 + .../xpack/core/XPackSettings.java | 7 + .../xpack/core/template/ResourceUtils.java | 44 ++++ .../template}/YamlIngestPipelineConfig.java | 22 +- .../core/template/YamlTemplateRegistry.java | 219 ++++++++++++++++++ .../src/main/resources/metrics@mappings.json | 44 ++++ .../src/main/resources/traces@mappings.json | 28 +++ .../src/main/resources/traces@settings.json | 18 ++ x-pack/plugin/otel-data/README.md | 33 +++ x-pack/plugin/otel-data/build.gradle | 38 +++ .../oteldata/OTelIndexTemplateRegistry.java | 58 +++++ .../xpack/oteldata/OTelPlugin.java | 79 +++++++ .../xpack/oteldata/package-info.java | 17 ++ .../logs-otel@mappings.yaml | 42 ++++ .../metrics-otel@mappings.yaml | 17 ++ .../component-templates/otel@mappings.yaml | 64 +++++ .../semconv-resource-to-ecs@mappings.yaml | 128 ++++++++++ .../traces-otel@mappings.yaml | 68 ++++++ .../index-templates/logs-otel@template.yaml | 27 +++ .../metrics-otel@template.yaml | 36 +++ .../index-templates/traces-otel@template.yaml | 27 +++ .../src/main/resources/resources.yaml | 15 ++ .../xpack/oteldata/OTelYamlTestSuiteIT.java | 54 +++++ .../resources/rest-api-spec/test/10_otel.yml | 38 +++ .../rest-api-spec/test/20_logs.tests.yml | 22 ++ .../rest-api-spec/test/20_metrics_tests.yml | 149 ++++++++++++ .../rest-api-spec/test/20_traces_tests.yml | 94 ++++++++ .../test/30_non_ecs_alias_tests.yml | 37 +++ .../security/authz/AuthorizationUtils.java | 2 + .../xpack/stack/StackTemplateRegistry.java | 22 +- .../stack/StackTemplateRegistryTests.java | 14 +- 39 files changed, 1480 insertions(+), 219 deletions(-) create mode 100644 docs/changelog/111091.yaml delete mode 100644 x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/ResourceUtils.java delete mode 100644 x-pack/plugin/apm-data/src/main/resources/component-templates/traces@mappings.yaml create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/ResourceUtils.java rename x-pack/plugin/{apm-data/src/main/java/org/elasticsearch/xpack/apmdata => core/src/main/java/org/elasticsearch/xpack/core/template}/YamlIngestPipelineConfig.java (56%) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlTemplateRegistry.java create mode 100644 x-pack/plugin/core/template-resources/src/main/resources/traces@mappings.json create mode 100644 x-pack/plugin/core/template-resources/src/main/resources/traces@settings.json create mode 100644 x-pack/plugin/otel-data/README.md create mode 100644 x-pack/plugin/otel-data/build.gradle create mode 100644 x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/OTelIndexTemplateRegistry.java create mode 100644 x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/OTelPlugin.java create mode 100644 x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/package-info.java create mode 100644 x-pack/plugin/otel-data/src/main/resources/component-templates/logs-otel@mappings.yaml create mode 100644 x-pack/plugin/otel-data/src/main/resources/component-templates/metrics-otel@mappings.yaml create mode 100644 x-pack/plugin/otel-data/src/main/resources/component-templates/otel@mappings.yaml create mode 100644 x-pack/plugin/otel-data/src/main/resources/component-templates/semconv-resource-to-ecs@mappings.yaml create mode 100644 x-pack/plugin/otel-data/src/main/resources/component-templates/traces-otel@mappings.yaml create mode 100644 x-pack/plugin/otel-data/src/main/resources/index-templates/logs-otel@template.yaml create mode 100644 x-pack/plugin/otel-data/src/main/resources/index-templates/metrics-otel@template.yaml create mode 100644 x-pack/plugin/otel-data/src/main/resources/index-templates/traces-otel@template.yaml create mode 100644 x-pack/plugin/otel-data/src/main/resources/resources.yaml create mode 100644 x-pack/plugin/otel-data/src/yamlRestTest/java/org/elasticsearch/xpack/oteldata/OTelYamlTestSuiteIT.java create mode 100644 x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/10_otel.yml create mode 100644 x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs.tests.yml create mode 100644 x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_tests.yml create mode 100644 x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_traces_tests.yml create mode 100644 x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/30_non_ecs_alias_tests.yml diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 0f7e3073ed022..5b98444c044d2 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -27,8 +27,12 @@ libs/logstash-bridge @elastic/logstash x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @elastic/kibana-security # APM Data index templates, etc. -x-pack/plugin/apm-data/src/main/resources @elastic/apm-server -x-pack/plugin/apm-data/src/yamlRestTest/resources @elastic/apm-server +x-pack/plugin/apm-data/src/main/resources @elastic/obs-ds-intake-services +x-pack/plugin/apm-data/src/yamlRestTest/resources @elastic/obs-ds-intake-services + +# OTel +x-pack/plugin/otel-data/src/main/resources @elastic/obs-ds-intake-services +x-pack/plugin/otel-data/src/yamlRestTest/resources @elastic/obs-ds-intake-services # Delivery gradle @elastic/es-delivery diff --git a/docs/changelog/111091.yaml b/docs/changelog/111091.yaml new file mode 100644 index 0000000000000..8444681a14a48 --- /dev/null +++ b/docs/changelog/111091.yaml @@ -0,0 +1,5 @@ +pr: 111091 +summary: "X-pack/plugin/otel: introduce x-pack-otel plugin" +area: Data streams +type: feature +issues: [] diff --git a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java index 04b0257f4180a..6f5d4e13dc56b 100644 --- a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java +++ b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java @@ -7,53 +7,24 @@ package org.elasticsearch.xpack.apmdata; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.metadata.ComponentTemplate; -import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.core.Nullable; import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.yaml.YamlXContent; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.template.IndexTemplateRegistry; -import org.elasticsearch.xpack.core.template.IngestPipelineConfig; +import org.elasticsearch.xpack.core.template.YamlTemplateRegistry; -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import static org.elasticsearch.xpack.apmdata.ResourceUtils.APM_TEMPLATE_VERSION_VARIABLE; -import static org.elasticsearch.xpack.apmdata.ResourceUtils.loadResource; -import static org.elasticsearch.xpack.apmdata.ResourceUtils.loadVersionedResourceUTF8; +import static org.elasticsearch.xpack.apmdata.APMPlugin.APM_DATA_REGISTRY_ENABLED; /** * Creates all index templates and ingest pipelines that are required for using Elastic APM. */ -public class APMIndexTemplateRegistry extends IndexTemplateRegistry { - private static final Logger logger = LogManager.getLogger(APMIndexTemplateRegistry.class); - // this node feature is a redefinition of {@link DataStreamFeatures#DATA_STREAM_LIFECYCLE} and it's meant to avoid adding a - // dependency to the data-streams module just for this - public static final NodeFeature DATA_STREAM_LIFECYCLE = new NodeFeature("data_stream.lifecycle"); - private final int version; +public class APMIndexTemplateRegistry extends YamlTemplateRegistry { - private final Map componentTemplates; - private final Map composableIndexTemplates; - private final List ingestPipelines; - private final FeatureService featureService; - private volatile boolean enabled; + public static final String APM_TEMPLATE_VERSION_VARIABLE = "xpack.apmdata.template.version"; - @SuppressWarnings("unchecked") public APMIndexTemplateRegistry( Settings nodeSettings, ClusterService clusterService, @@ -62,133 +33,29 @@ public APMIndexTemplateRegistry( NamedXContentRegistry xContentRegistry, FeatureService featureService ) { - super(nodeSettings, clusterService, threadPool, client, xContentRegistry); - - try { - final Map apmResources = XContentHelper.convertToMap( - YamlXContent.yamlXContent, - loadResource("/resources.yaml"), - false - ); - version = (((Number) apmResources.get("version")).intValue()); - final List componentTemplateNames = (List) apmResources.get("component-templates"); - final List indexTemplateNames = (List) apmResources.get("index-templates"); - final List ingestPipelineConfigs = (List) apmResources.get("ingest-pipelines"); - - componentTemplates = componentTemplateNames.stream() - .map(o -> (String) o) - .collect(Collectors.toMap(name -> name, name -> loadComponentTemplate(name, version))); - composableIndexTemplates = indexTemplateNames.stream() - .map(o -> (String) o) - .collect(Collectors.toMap(name -> name, name -> loadIndexTemplate(name, version))); - ingestPipelines = ingestPipelineConfigs.stream().map(o -> (Map>) o).map(map -> { - Map.Entry> pipelineConfig = map.entrySet().iterator().next(); - return loadIngestPipeline(pipelineConfig.getKey(), version, (List) pipelineConfig.getValue().get("dependencies")); - }).collect(Collectors.toList()); - this.featureService = featureService; - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - public int getVersion() { - return version; - } - - void setEnabled(boolean enabled) { - logger.info("APM index template registry is {}", enabled ? "enabled" : "disabled"); - this.enabled = enabled; - } - - public boolean isEnabled() { - return enabled; - } - - public void close() { - clusterService.removeListener(this); - } - - @Override - protected String getOrigin() { - return ClientHelper.APM_ORIGIN; - } - - @Override - protected boolean isClusterReady(ClusterChangedEvent event) { - // Ensure current version of the components are installed only after versions that support data stream lifecycle - // due to the use of the feature in all the `@lifecycle` component templates - return featureService.clusterHasFeature(event.state(), DATA_STREAM_LIFECYCLE); + super(nodeSettings, clusterService, threadPool, client, xContentRegistry, featureService); } @Override - protected boolean requiresMasterNode() { - return true; + public String getName() { + return "apm"; } @Override - protected Map getComponentTemplateConfigs() { - if (enabled) { - return componentTemplates; - } else { - return Map.of(); + public void initialize() { + super.initialize(); + if (isEnabled()) { + clusterService.getClusterSettings().addSettingsUpdateConsumer(APM_DATA_REGISTRY_ENABLED, this::setEnabled); } } @Override - protected Map getComposableTemplateConfigs() { - if (enabled) { - return composableIndexTemplates; - } else { - return Map.of(); - } - } - - @Override - protected List getIngestPipelines() { - if (enabled) { - return ingestPipelines; - } else { - return Collections.emptyList(); - } - } - - private static ComponentTemplate loadComponentTemplate(String name, int version) { - try { - final byte[] content = loadVersionedResourceUTF8("/component-templates/" + name + ".yaml", version); - try (var parser = YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, content)) { - return ComponentTemplate.parse(parser); - } - } catch (Exception e) { - throw new RuntimeException("failed to load APM Ingest plugin's component template: " + name, e); - } - } - - private static ComposableIndexTemplate loadIndexTemplate(String name, int version) { - try { - final byte[] content = loadVersionedResourceUTF8("/index-templates/" + name + ".yaml", version); - try (var parser = YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, content)) { - return ComposableIndexTemplate.parse(parser); - } - } catch (Exception e) { - throw new RuntimeException("failed to load APM Ingest plugin's index template: " + name, e); - } - } - - private static IngestPipelineConfig loadIngestPipeline(String name, int version, @Nullable List dependencies) { - if (dependencies == null) { - dependencies = Collections.emptyList(); - } - return new YamlIngestPipelineConfig( - name, - "/ingest-pipelines/" + name + ".yaml", - version, - APM_TEMPLATE_VERSION_VARIABLE, - dependencies - ); + protected String getVersionProperty() { + return APM_TEMPLATE_VERSION_VARIABLE; } @Override - protected boolean applyRolloverAfterTemplateV2Upgrade() { - return true; + protected String getOrigin() { + return ClientHelper.APM_ORIGIN; } } diff --git a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java index 102b0d38461c3..aefb45f6186c1 100644 --- a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java +++ b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java @@ -60,7 +60,6 @@ public Collection createComponents(PluginServices services) { if (enabled) { APMIndexTemplateRegistry registryInstance = registry.get(); registryInstance.setEnabled(APM_DATA_REGISTRY_ENABLED.get(settings)); - clusterService.getClusterSettings().addSettingsUpdateConsumer(APM_DATA_REGISTRY_ENABLED, registryInstance::setEnabled); registryInstance.initialize(); } return Collections.emptyList(); diff --git a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/ResourceUtils.java b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/ResourceUtils.java deleted file mode 100644 index 1e6a9a9998a82..0000000000000 --- a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/ResourceUtils.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.apmdata; - -import org.elasticsearch.common.io.Streams; -import org.elasticsearch.xpack.core.template.TemplateUtils; - -import java.io.IOException; -import java.io.InputStream; -import java.nio.charset.StandardCharsets; -import java.util.Map; - -public class ResourceUtils { - - public static final String APM_TEMPLATE_VERSION_VARIABLE = "xpack.apmdata.template.version"; - - static byte[] loadVersionedResourceUTF8(String name, int version) { - return loadVersionedResourceUTF8(name, version, Map.of()); - } - - static byte[] loadVersionedResourceUTF8(String name, int version, Map variables) { - try { - String content = loadResource(name); - content = TemplateUtils.replaceVariables(content, String.valueOf(version), APM_TEMPLATE_VERSION_VARIABLE, variables); - return content.getBytes(StandardCharsets.UTF_8); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - static String loadResource(String name) throws IOException { - InputStream is = APMIndexTemplateRegistry.class.getResourceAsStream(name); - if (is == null) { - throw new IOException("Resource [" + name + "] not found in classpath."); - } - return Streams.readFully(is).utf8ToString(); - } -} diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces@mappings.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/traces@mappings.yaml deleted file mode 100644 index 51c987df4df60..0000000000000 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces@mappings.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -version: ${xpack.apmdata.template.version} -_meta: - description: Default mappings for traces data streams - managed: true -template: - mappings: - properties: - data_stream.type: - type: constant_keyword - value: traces diff --git a/x-pack/plugin/apm-data/src/main/resources/resources.yaml b/x-pack/plugin/apm-data/src/main/resources/resources.yaml index efa6ae694c464..fa38fda679e49 100644 --- a/x-pack/plugin/apm-data/src/main/resources/resources.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/resources.yaml @@ -23,7 +23,6 @@ component-templates: - metrics-apm.service_summary@mappings - metrics-apm.service_transaction@mappings - metrics-apm.transaction@mappings - - traces@mappings - traces-apm@mappings - traces-apm.rum@mappings diff --git a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java index e9f0775836c71..1d6faa0f403d4 100644 --- a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java +++ b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java @@ -75,7 +75,6 @@ public class APMIndexTemplateRegistryTests extends ESTestCase { private APMIndexTemplateRegistry apmIndexTemplateRegistry; private StackTemplateRegistryAccessor stackTemplateRegistryAccessor; - private ClusterService clusterService; private ThreadPool threadPool; private VerifyingClient client; @@ -89,7 +88,7 @@ public void createRegistryAndClient() { threadPool = new TestThreadPool(this.getClass().getName()); client = new VerifyingClient(threadPool); - clusterService = ClusterServiceUtils.createClusterService(threadPool, clusterSettings); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool, clusterSettings); FeatureService featureService = new FeatureService(List.of(new DataStreamFeatures())); stackTemplateRegistryAccessor = new StackTemplateRegistryAccessor( new StackTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, NamedXContentRegistry.EMPTY, featureService) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java index d27d7a21ddb73..4e7aa37fe1a0b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java @@ -194,6 +194,7 @@ private static String maybeRewriteSingleAuthenticationHeaderForVersion( public static final String CONNECTORS_ORIGIN = "connectors"; public static final String INFERENCE_ORIGIN = "inference"; public static final String APM_ORIGIN = "apm"; + public static final String OTEL_ORIGIN = "otel"; private ClientHelper() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index d33b2aecdab04..f76b0d2bb6d8d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -93,6 +93,13 @@ public Iterator> settings() { /** Setting for enabling or disabling APM Data. Defaults to true. */ public static final Setting APM_DATA_ENABLED = Setting.boolSetting("xpack.apm_data.enabled", true, Setting.Property.NodeScope); + /** Setting for enabling or disabling OTel Data. Defaults to true. */ + public static final Setting OTEL_DATA_ENABLED = Setting.boolSetting( + "xpack.otel_data.enabled", + true, + Setting.Property.NodeScope + ); + /** Setting for enabling or disabling enterprise search. Defaults to true. */ public static final Setting ENTERPRISE_SEARCH_ENABLED = Setting.boolSetting( "xpack.ent_search.enabled", diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/ResourceUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/ResourceUtils.java new file mode 100644 index 0000000000000..9840535989a7c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/ResourceUtils.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.template; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.Map; + +public class ResourceUtils { + static byte[] loadVersionedResourceUTF8(Class clazz, String name, int version, String versionProperty) { + return loadVersionedResourceUTF8(clazz, name, version, versionProperty, Map.of()); + } + + static byte[] loadVersionedResourceUTF8( + Class clazz, + String name, + int version, + String versionProperty, + Map variables + ) { + try { + String content = loadResource(clazz, name); + content = TemplateUtils.replaceVariables(content, String.valueOf(version), versionProperty, variables); + return content.getBytes(StandardCharsets.UTF_8); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public static String loadResource(Class clazz, String name) throws IOException { + InputStream is = clazz.getResourceAsStream(name); + if (is == null) { + throw new IOException("Resource [" + name + "] not found in classpath."); + } + return new String(is.readAllBytes(), java.nio.charset.StandardCharsets.UTF_8); + } + +} diff --git a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/YamlIngestPipelineConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlIngestPipelineConfig.java similarity index 56% rename from x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/YamlIngestPipelineConfig.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlIngestPipelineConfig.java index de1b715dd138d..0cb69b490a73a 100644 --- a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/YamlIngestPipelineConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlIngestPipelineConfig.java @@ -5,23 +5,29 @@ * 2.0. */ -package org.elasticsearch.xpack.apmdata; +package org.elasticsearch.xpack.core.template; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.core.template.IngestPipelineConfig; import java.util.List; -import static org.elasticsearch.xpack.apmdata.ResourceUtils.loadVersionedResourceUTF8; +import static org.elasticsearch.xpack.core.template.ResourceUtils.loadVersionedResourceUTF8; -/** - * An APM-plugin-specific implementation that loads ingest pipelines in yaml format from a local resources repository - */ public class YamlIngestPipelineConfig extends IngestPipelineConfig { - public YamlIngestPipelineConfig(String id, String resource, int version, String versionProperty, List dependencies) { + private final Class clazz; + + public YamlIngestPipelineConfig( + String id, + String resource, + int version, + String versionProperty, + List dependencies, + Class clazz + ) { super(id, resource, version, versionProperty, dependencies); + this.clazz = clazz; } @Override @@ -31,6 +37,6 @@ public XContentType getXContentType() { @Override public BytesReference loadConfig() { - return new BytesArray(loadVersionedResourceUTF8("/ingest-pipelines/" + id + ".yaml", version, variables)); + return new BytesArray(loadVersionedResourceUTF8(clazz, "/ingest-pipelines/" + id + ".yaml", version, versionProperty, variables)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlTemplateRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlTemplateRegistry.java new file mode 100644 index 0000000000000..7471f722261bf --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlTemplateRegistry.java @@ -0,0 +1,219 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.template; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.metadata.ComponentTemplate; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.yaml.YamlXContent; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.template.ResourceUtils.loadResource; +import static org.elasticsearch.xpack.core.template.ResourceUtils.loadVersionedResourceUTF8; + +/** + * Creates index templates and ingest pipelines based on YAML files from resources. + */ +public abstract class YamlTemplateRegistry extends IndexTemplateRegistry { + private static final Logger logger = LogManager.getLogger(YamlTemplateRegistry.class); + // this node feature is a redefinition of {@link DataStreamFeatures#DATA_STREAM_LIFECYCLE} and it's meant to avoid adding a + // dependency to the data-streams module just for this + public static final NodeFeature DATA_STREAM_LIFECYCLE = new NodeFeature("data_stream.lifecycle"); + private final int version; + + private final Map componentTemplates; + private final Map composableIndexTemplates; + private final List ingestPipelines; + private final FeatureService featureService; + private volatile boolean enabled; + + @SuppressWarnings("unchecked") + public YamlTemplateRegistry( + Settings nodeSettings, + ClusterService clusterService, + ThreadPool threadPool, + Client client, + NamedXContentRegistry xContentRegistry, + FeatureService featureService + ) { + super(nodeSettings, clusterService, threadPool, client, xContentRegistry); + + try { + final Map resources = XContentHelper.convertToMap( + YamlXContent.yamlXContent, + loadResource(this.getClass(), "/resources.yaml"), + false + ); + version = (((Number) resources.get("version")).intValue()); + + final List componentTemplateNames = (List) resources.get("component-templates"); + final List indexTemplateNames = (List) resources.get("index-templates"); + final List ingestPipelineConfigs = (List) resources.get("ingest-pipelines"); + + componentTemplates = Optional.ofNullable(componentTemplateNames) + .orElse(Collections.emptyList()) + .stream() + .map(o -> (String) o) + .collect(Collectors.toMap(name -> name, name -> loadComponentTemplate(name, version))); + composableIndexTemplates = Optional.ofNullable(indexTemplateNames) + .orElse(Collections.emptyList()) + .stream() + .map(o -> (String) o) + .collect(Collectors.toMap(name -> name, name -> loadIndexTemplate(name, version))); + ingestPipelines = Optional.ofNullable(ingestPipelineConfigs) + .orElse(Collections.emptyList()) + .stream() + .map(o -> (Map>) o) + .map(map -> { + Map.Entry> pipelineConfig = map.entrySet().iterator().next(); + return loadIngestPipeline( + pipelineConfig.getKey(), + version, + (List) pipelineConfig.getValue().get("dependencies") + ); + }) + .collect(Collectors.toList()); + this.featureService = featureService; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public int getVersion() { + return version; + } + + /*** + * + * @return A friendly, human readable name of the index template regisry + */ + public abstract String getName(); + + public void setEnabled(boolean enabled) { + logger.info("{} index template registry is {}", getName(), enabled ? "enabled" : "disabled"); + this.enabled = enabled; + } + + public boolean isEnabled() { + return enabled; + } + + public void close() { + clusterService.removeListener(this); + } + + @Override + protected boolean isClusterReady(ClusterChangedEvent event) { + // Ensure current version of the components are installed only after versions that support data stream lifecycle + // due to the use of the feature in all the `@lifecycle` component templates + return featureService.clusterHasFeature(event.state(), DATA_STREAM_LIFECYCLE); + } + + @Override + protected boolean requiresMasterNode() { + return true; + } + + @Override + public Map getComponentTemplateConfigs() { + if (enabled) { + return componentTemplates; + } else { + return Map.of(); + } + } + + @Override + public Map getComposableTemplateConfigs() { + if (enabled) { + return composableIndexTemplates; + } else { + return Map.of(); + } + } + + @Override + public List getIngestPipelines() { + if (enabled) { + return ingestPipelines; + } else { + return Collections.emptyList(); + } + } + + protected abstract String getVersionProperty(); + + private ComponentTemplate loadComponentTemplate(String name, int version) { + try { + final byte[] content = loadVersionedResourceUTF8( + this.getClass(), + "/component-templates/" + name + ".yaml", + version, + getVersionProperty() + ); + try (var parser = YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, content)) { + return ComponentTemplate.parse(parser); + } + } catch (Exception e) { + throw new RuntimeException("failed to load " + getName() + " Ingest plugin's component template: " + name, e); + } + } + + private ComposableIndexTemplate loadIndexTemplate(String name, int version) { + try { + final byte[] content = loadVersionedResourceUTF8( + this.getClass(), + "/index-templates/" + name + ".yaml", + version, + getVersionProperty() + ); + try (var parser = YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, content)) { + return ComposableIndexTemplate.parse(parser); + } + } catch (Exception e) { + throw new RuntimeException("failed to load " + getName() + " Ingest plugin's index template: " + name, e); + } + } + + private IngestPipelineConfig loadIngestPipeline(String name, int version, @Nullable List dependencies) { + if (dependencies == null) { + dependencies = Collections.emptyList(); + } + return new YamlIngestPipelineConfig( + name, + "/ingest-pipelines/" + name + ".yaml", + version, + getVersionProperty(), + dependencies, + this.getClass() + ); + } + + @Override + protected boolean applyRolloverAfterTemplateV2Upgrade() { + return true; + } +} diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@mappings.json index b4aa999697632..9c58322f12d03 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@mappings.json @@ -43,6 +43,50 @@ "default_metric": "value_count" } } + }, + { + "histogram": { + "mapping": { + "type": "histogram", + "ignore_malformed": true + } + } + }, + { + "counter_long": { + "mapping": { + "type": "long", + "time_series_metric": "counter", + "ignore_malformed": true + } + } + }, + { + "gauge_long": { + "mapping": { + "type": "long", + "time_series_metric": "gauge", + "ignore_malformed": true + } + } + }, + { + "counter_double": { + "mapping": { + "type": "double", + "time_series_metric": "counter", + "ignore_malformed": true + } + } + }, + { + "gauge_double": { + "mapping": { + "type": "double", + "time_series_metric": "gauge", + "ignore_malformed": true + } + } } ], "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/traces@mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/traces@mappings.json new file mode 100644 index 0000000000000..e3990a250f0c2 --- /dev/null +++ b/x-pack/plugin/core/template-resources/src/main/resources/traces@mappings.json @@ -0,0 +1,28 @@ +{ + "template": { + "mappings": { + "date_detection": false, + "properties": { + "@timestamp": { + "type": "date" + }, + "data_stream.type": { + "type": "constant_keyword", + "value": "traces" + }, + "data_stream.dataset": { + "type": "constant_keyword" + }, + "data_stream.namespace": { + "type": "constant_keyword" + } + } + } + }, + "_meta": { + "description": "default mappings for the traces index template installed by x-pack", + "managed": true + }, + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} +} diff --git a/x-pack/plugin/core/template-resources/src/main/resources/traces@settings.json b/x-pack/plugin/core/template-resources/src/main/resources/traces@settings.json new file mode 100644 index 0000000000000..3f4fdba6f4f46 --- /dev/null +++ b/x-pack/plugin/core/template-resources/src/main/resources/traces@settings.json @@ -0,0 +1,18 @@ +{ + "template": { + "settings": { + "index": { + "codec": "best_compression", + "mapping": { + "ignore_malformed": true + } + } + } + }, + "_meta": { + "description": "default settings for the traces index template installed by x-pack", + "managed": true + }, + "version": ${xpack.stack.template.version}, + "deprecated": ${xpack.stack.template.deprecated} +} diff --git a/x-pack/plugin/otel-data/README.md b/x-pack/plugin/otel-data/README.md new file mode 100644 index 0000000000000..7cab6bfa453d8 --- /dev/null +++ b/x-pack/plugin/otel-data/README.md @@ -0,0 +1,33 @@ +## OpenTelemetry Ingest plugin + +The OpenTelemetry Ingest plugin installs index templates and component templates for OpenTelemetry data. + +All resources are defined as YAML under [src/main/resources](src/main/resources). + +The OpenTelemetry index templates rely on mappings from `x-pack-core`. +See [x-pack/plugin/core/src/main/resources](../core/src/main/resources). + +## Adding/Removing/Updating a resource + +All resources are defined as YAML under [src/main/resources](src/main/resources). + +For a resource to be known to the plugin it must be added to +[src/main/resources/resources.yaml](src/main/resources/resources.yaml) in the +appropriate section. + +Any update to resources included by this package also requires a bump to the +`version` property included in the resources file. + +## Testing + +## Integration testing + +The index templates and ingest pipeline functionality is tested using YAML REST tests. +These can be run with: + +``` +./gradlew :x-pack:plugin:otel-data:yamlRestTest +``` + +Refer to the [rest-api-spec documentation](../../../rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc) +for information about writing YAML REST tests. diff --git a/x-pack/plugin/otel-data/build.gradle b/x-pack/plugin/otel-data/build.gradle new file mode 100644 index 0000000000000..f56efe21acccc --- /dev/null +++ b/x-pack/plugin/otel-data/build.gradle @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +apply plugin: 'elasticsearch.internal-es-plugin' +apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.internal-cluster-test' + +esplugin { + name 'x-pack-otel-data' + description 'The OTEL plugin defines OTEL data streams and ingest pipelines.' + classname 'org.elasticsearch.xpack.oteldata.OTelPlugin' + extendedPlugins = ['x-pack-core'] +} + +dependencies { + compileOnly project(path: xpackModule('core')) + testImplementation project(path: ':x-pack:plugin:stack') + testImplementation(testArtifact(project(xpackModule('core')))) + testImplementation project(':modules:data-streams') + clusterModules project(':modules:data-streams') + clusterModules project(':modules:ingest-common') + clusterModules project(':modules:ingest-geoip') + clusterModules project(':modules:ingest-user-agent') + clusterModules project(':modules:lang-mustache') + clusterModules project(':modules:mapper-extras') + clusterModules project(xpackModule('analytics')) + clusterModules project(xpackModule('ilm')) + clusterModules project(xpackModule('mapper-aggregate-metric')) + clusterModules project(xpackModule('mapper-constant-keyword')) + clusterModules project(xpackModule('mapper-counted-keyword')) + clusterModules project(xpackModule('stack')) + clusterModules project(xpackModule('wildcard')) + clusterModules project(xpackModule('mapper-version')) +} diff --git a/x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/OTelIndexTemplateRegistry.java b/x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/OTelIndexTemplateRegistry.java new file mode 100644 index 0000000000000..435530542c857 --- /dev/null +++ b/x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/OTelIndexTemplateRegistry.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.oteldata; + +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.template.YamlTemplateRegistry; + +import static org.elasticsearch.xpack.oteldata.OTelPlugin.OTEL_DATA_REGISTRY_ENABLED; + +public class OTelIndexTemplateRegistry extends YamlTemplateRegistry { + + public static final String OTEL_TEMPLATE_VERSION_VARIABLE = "xpack.oteldata.template.version"; + + public OTelIndexTemplateRegistry( + Settings nodeSettings, + ClusterService clusterService, + ThreadPool threadPool, + Client client, + NamedXContentRegistry xContentRegistry, + FeatureService featureService + ) { + super(nodeSettings, clusterService, threadPool, client, xContentRegistry, featureService); + } + + @Override + public void initialize() { + super.initialize(); + if (isEnabled()) { + clusterService.getClusterSettings().addSettingsUpdateConsumer(OTEL_DATA_REGISTRY_ENABLED, this::setEnabled); + } + } + + @Override + protected String getOrigin() { + return ClientHelper.OTEL_ORIGIN; + } + + @Override + public String getName() { + return "OpenTelemetry"; + } + + @Override + protected String getVersionProperty() { + return OTEL_TEMPLATE_VERSION_VARIABLE; + } +} diff --git a/x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/OTelPlugin.java b/x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/OTelPlugin.java new file mode 100644 index 0000000000000..cece2b5373631 --- /dev/null +++ b/x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/OTelPlugin.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.oteldata; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.xpack.core.XPackSettings; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +public class OTelPlugin extends Plugin implements ActionPlugin { + private static final Logger logger = LogManager.getLogger(OTelPlugin.class); + + final SetOnce registry = new SetOnce<>(); + + private final boolean enabled; + + // OTEL_DATA_REGISTRY_ENABLED controls enabling the index template registry. + // + // This setting will be ignored if the plugin is disabled. + static final Setting OTEL_DATA_REGISTRY_ENABLED = Setting.boolSetting( + "xpack.otel_data.registry.enabled", + // OTel-data is under development, and we start with opt-in first. + // Furthermore, this could help with staged rollout in serverless + false, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + public OTelPlugin(Settings settings) { + this.enabled = XPackSettings.OTEL_DATA_ENABLED.get(settings); + } + + @Override + public Collection createComponents(PluginServices services) { + logger.info("OTel ingest plugin is {}", enabled ? "enabled" : "disabled"); + Settings settings = services.environment().settings(); + ClusterService clusterService = services.clusterService(); + registry.set( + new OTelIndexTemplateRegistry( + settings, + clusterService, + services.threadPool(), + services.client(), + services.xContentRegistry(), + services.featureService() + ) + ); + if (enabled) { + OTelIndexTemplateRegistry registryInstance = registry.get(); + registryInstance.setEnabled(OTEL_DATA_REGISTRY_ENABLED.get(settings)); + registryInstance.initialize(); + } + return Collections.emptyList(); + } + + @Override + public void close() { + registry.get().close(); + } + + @Override + public List> getSettings() { + return List.of(OTEL_DATA_REGISTRY_ENABLED); + } +} diff --git a/x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/package-info.java b/x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/package-info.java new file mode 100644 index 0000000000000..98c6c9a3999c4 --- /dev/null +++ b/x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/package-info.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +/** + * This package contains index templates for OpenTelemetry data. It covers traces (spans), metrics, and logs. + * The plugin is expected to be used in combination with the Elasticsearch exporter defined as the exporter + * within an OpenTelemetry collector with the mapping mode `otel`. + * For more information about the Elasticsearch exporter + * @see + * https://github.com/open-telemetry/opentelemetry-collector-contrib. + * + */ +package org.elasticsearch.xpack.oteldata; diff --git a/x-pack/plugin/otel-data/src/main/resources/component-templates/logs-otel@mappings.yaml b/x-pack/plugin/otel-data/src/main/resources/component-templates/logs-otel@mappings.yaml new file mode 100644 index 0000000000000..a0971f45ccf4f --- /dev/null +++ b/x-pack/plugin/otel-data/src/main/resources/component-templates/logs-otel@mappings.yaml @@ -0,0 +1,42 @@ +--- +version: ${xpack.oteldata.template.version} +_meta: + description: Default mappings for OpenTelemetry logs index template installed by x-pack + managed: true +template: + settings: + index: + mode: logsdb + sort: + field: [ "resource.attributes.host.name" ] + mappings: + properties: + data_stream.type: + type: constant_keyword + value: logs + observed_timestamp: + type: date_nanos + severity_number: + type: byte + severity_text: + type: keyword + log.level: + type: alias + path: severity_text + body_text: + type: match_only_text + message: + type: alias + path: body_text + body_structured: + type: flattened + trace_id: + type: keyword + trace.id: + type: alias + path: trace_id + span_id: + type: keyword + span.id: + type: alias + path: span_id diff --git a/x-pack/plugin/otel-data/src/main/resources/component-templates/metrics-otel@mappings.yaml b/x-pack/plugin/otel-data/src/main/resources/component-templates/metrics-otel@mappings.yaml new file mode 100644 index 0000000000000..b7a17dba973f8 --- /dev/null +++ b/x-pack/plugin/otel-data/src/main/resources/component-templates/metrics-otel@mappings.yaml @@ -0,0 +1,17 @@ +version: ${xpack.oteldata.template.version} +_meta: + description: Default mappings for the OpenTelemetry metrics index template installed by x-pack + managed: true +template: + mappings: + properties: + start_timestamp: + type: date_nanos + metrics: + type: passthrough + dynamic: true + priority: 1 + unit: + type: keyword + time_series_dimension: true + ignore_above: 1024 diff --git a/x-pack/plugin/otel-data/src/main/resources/component-templates/otel@mappings.yaml b/x-pack/plugin/otel-data/src/main/resources/component-templates/otel@mappings.yaml new file mode 100644 index 0000000000000..fad85661203d6 --- /dev/null +++ b/x-pack/plugin/otel-data/src/main/resources/component-templates/otel@mappings.yaml @@ -0,0 +1,64 @@ +--- +version: ${xpack.oteldata.template.version} +_meta: + description: Default mappings for all OpenTelemetry data streams + managed: true +template: + mappings: + date_detection: false + dynamic: false + properties: + "@timestamp": +#Ultimeately we aim to use date_nanos. Waiting for https://github.com/elastic/elasticsearch/issues/109352 + type: date + data_stream.type: + type: constant_keyword + data_stream.dataset: + type: constant_keyword + data_stream.namespace: + type: constant_keyword + attributes: + type: passthrough + dynamic: true + priority: 10 + time_series_dimension: true + dropped_attributes_count: + type: long + scope: + properties: + name: + type: keyword + ignore_above: 1024 + version: + type: version + schema_url: + type: keyword + ignore_above: 1024 + dropped_attributes_count: + type: long + attributes: + type: passthrough + dynamic: true + priority: 20 + time_series_dimension: true + resource: + properties: + schema_url: + type: keyword + ignore_above: 1024 + dropped_attributes_count: + type: long + attributes: + type: passthrough + dynamic: true + priority: 30 + time_series_dimension: true + dynamic_templates: + - complex_attributes: + path_match: + - resource.attributes.* + - scope.attributes.* + - attributes.* + match_mapping_type: object + mapping: + type: flattened diff --git a/x-pack/plugin/otel-data/src/main/resources/component-templates/semconv-resource-to-ecs@mappings.yaml b/x-pack/plugin/otel-data/src/main/resources/component-templates/semconv-resource-to-ecs@mappings.yaml new file mode 100644 index 0000000000000..711f72ae95220 --- /dev/null +++ b/x-pack/plugin/otel-data/src/main/resources/component-templates/semconv-resource-to-ecs@mappings.yaml @@ -0,0 +1,128 @@ +--- +version: ${xpack.oteldata.template.version} +_meta: + description: Aliases from OpenTelemetry SemConv fields to ECS (and some non-ECS) fields + managed: true +template: + mappings: + properties: + resource: + properties: + attributes: + type: passthrough + dynamic: true + priority: 30 + time_series_dimension: true + properties: + host.name: + type: keyword + ignore_above: 1024 + telemetry.sdk.language: + type: keyword + ignore_above: 1024 + service.instance.id: + type: keyword + ignore_above: 1024 + deployment.environment: + type: keyword + ignore_above: 1024 + cloud.platform: + type: keyword + ignore_above: 1024 + container.image.tags: + type: keyword + ignore_above: 1024 + host.arch: + type: keyword + ignore_above: 1024 + process.executable.path: + type: keyword + ignore_above: 1024 + process.runtime.name: + type: keyword + ignore_above: 1024 + process.runtime.version: + type: keyword + ignore_above: 1024 + os.name: + type: keyword + ignore_above: 1024 + os.type: + type: keyword + ignore_above: 1024 + os.description: + type: keyword + ignore_above: 1024 + os.version: + type: keyword + ignore_above: 1024 + k8s.deployment.name: + type: keyword + ignore_above: 1024 + k8s.namespace.name: + type: keyword + ignore_above: 1024 + k8s.node.name: + type: keyword + ignore_above: 1024 + k8s.pod.name: + type: keyword + ignore_above: 1024 + k8s.pod.uid: + type: keyword + ignore_above: 1024 + service.node.name: + type: alias + path: resource.attributes.service.instance.id + service.environment: + type: alias + path: resource.attributes.deployment.environment + cloud.service.name: + type: alias + path: resource.attributes.cloud.platform + container.image.tag: + type: alias + path: resource.attributes.container.image.tags + host.architecture: + type: alias + path: resource.attributes.host.arch + process.executable: + type: alias + path: resource.attributes.process.executable.path + service.runtime.name: + type: alias + path: resource.attributes.process.runtime.name + service.runtime.version: + type: alias + path: resource.attributes.process.runtime.version + host.os.name: + type: alias + path: resource.attributes.os.name + host.os.platform: + type: alias + path: resource.attributes.os.type + host.os.full: + type: alias + path: resource.attributes.os.description + host.os.version: + type: alias + path: resource.attributes.os.version + kubernetes.deployment.name: + type: alias + path: resource.attributes.k8s.deployment.name + kubernetes.namespace: + type: alias + path: resource.attributes.k8s.namespace.name + kubernetes.node.name: + type: alias + path: resource.attributes.k8s.node.name + kubernetes.pod.name: + type: alias + path: resource.attributes.k8s.pod.name + kubernetes.pod.uid: + type: alias + path: resource.attributes.k8s.pod.uid +# Below are non-ECS fields that may be used by Kibana. + service.language.name: + type: alias + path: resource.attributes.telemetry.sdk.language diff --git a/x-pack/plugin/otel-data/src/main/resources/component-templates/traces-otel@mappings.yaml b/x-pack/plugin/otel-data/src/main/resources/component-templates/traces-otel@mappings.yaml new file mode 100644 index 0000000000000..a4c62efeed7a4 --- /dev/null +++ b/x-pack/plugin/otel-data/src/main/resources/component-templates/traces-otel@mappings.yaml @@ -0,0 +1,68 @@ +--- +version: ${xpack.oteldata.template.version} +_meta: + description: Default mappings for OpenTelemetry traces + managed: true +template: + settings: + index: + mode: logsdb + sort: + field: [ "resource.attributes.host.name" ] + mappings: + _source: + mode: synthetic + properties: + trace_id: + type: keyword + trace.id: + type: alias + path: trace_id + span_id: + type: keyword + span.id: + type: alias + path: span_id + trace_state: + type: keyword + parent_span_id: + type: keyword + parent.id: + type: alias + path: parent_span_id + name: + type: keyword + span.name: + type: alias + path: name + kind: + type: keyword + duration: + type: long + meta: + unit: nanos + dropped_events_count: + type: long + links: + store_array_source: true + properties: + trace_id: + type: keyword + span_id: + type: keyword + trace_state: + type: keyword + attributes: + type: object + subobjects: false + dynamic: true + dropped_attributes_count: + type: long + dropped_links_count: + type: long + status: + properties: + message: + type: keyword + code: + type: keyword diff --git a/x-pack/plugin/otel-data/src/main/resources/index-templates/logs-otel@template.yaml b/x-pack/plugin/otel-data/src/main/resources/index-templates/logs-otel@template.yaml new file mode 100644 index 0000000000000..6772ec5bc65d4 --- /dev/null +++ b/x-pack/plugin/otel-data/src/main/resources/index-templates/logs-otel@template.yaml @@ -0,0 +1,27 @@ +--- +version: ${xpack.oteldata.template.version} +index_patterns: ["logs-*.otel-*"] +priority: 120 +data_stream: {} +allow_auto_create: true +_meta: + description: default OpenTelemetry logs template installed by x-pack + managed: true +composed_of: + - logs@mappings + - logs@settings + - otel@mappings + - logs-otel@mappings + - semconv-resource-to-ecs@mappings + - logs@custom + - logs-otel@custom + - ecs@mappings +ignore_missing_component_templates: + - logs@custom + - logs-otel@custom +template: + mappings: + properties: + data_stream.type: + type: constant_keyword + value: logs diff --git a/x-pack/plugin/otel-data/src/main/resources/index-templates/metrics-otel@template.yaml b/x-pack/plugin/otel-data/src/main/resources/index-templates/metrics-otel@template.yaml new file mode 100644 index 0000000000000..89ff28249aabb --- /dev/null +++ b/x-pack/plugin/otel-data/src/main/resources/index-templates/metrics-otel@template.yaml @@ -0,0 +1,36 @@ +--- +version: ${xpack.oteldata.template.version} +index_patterns: ["metrics-*.otel-*"] +priority: 120 +data_stream: {} +allow_auto_create: true +_meta: + description: default OpenTelemetry metrics template installed by x-pack + managed: true +composed_of: + - metrics@mappings + - metrics@tsdb-settings + - otel@mappings + - metrics-otel@mappings + - semconv-resource-to-ecs@mappings + - metrics@custom + - metrics-otel@custom +ignore_missing_component_templates: + - metrics@custom + - metrics-otel@custom +template: + settings: + index: + mode: time_series + mappings: + properties: + data_stream.type: + type: constant_keyword + value: metrics + dynamic_templates: + - all_strings_to_keywords: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string + diff --git a/x-pack/plugin/otel-data/src/main/resources/index-templates/traces-otel@template.yaml b/x-pack/plugin/otel-data/src/main/resources/index-templates/traces-otel@template.yaml new file mode 100644 index 0000000000000..370b9351c16f5 --- /dev/null +++ b/x-pack/plugin/otel-data/src/main/resources/index-templates/traces-otel@template.yaml @@ -0,0 +1,27 @@ +--- +version: ${xpack.oteldata.template.version} +index_patterns: ["traces-*.otel-*"] +priority: 120 +data_stream: {} +allow_auto_create: true +_meta: + description: default OpenTelemetry traces template installed by x-pack + managed: true +composed_of: + - traces@mappings + - traces@settings + - otel@mappings + - traces-otel@mappings + - semconv-resource-to-ecs@mappings + - traces@custom + - traces-otel@custom + - ecs@mappings +ignore_missing_component_templates: + - traces@custom + - traces-otel@custom +template: + mappings: + properties: + data_stream.type: + type: constant_keyword + value: traces diff --git a/x-pack/plugin/otel-data/src/main/resources/resources.yaml b/x-pack/plugin/otel-data/src/main/resources/resources.yaml new file mode 100644 index 0000000000000..8e0a7606cbd05 --- /dev/null +++ b/x-pack/plugin/otel-data/src/main/resources/resources.yaml @@ -0,0 +1,15 @@ +# "version" holds the version of the templates and ingest pipelines installed +# by xpack-plugin otel-data. This must be increased whenever an existing template is +# changed, in order for it to be updated on Elasticsearch upgrade. +version: 1 + +component-templates: + - otel@mappings + - logs-otel@mappings + - semconv-resource-to-ecs@mappings + - metrics-otel@mappings + - traces-otel@mappings +index-templates: + - logs-otel@template + - metrics-otel@template + - traces-otel@template diff --git a/x-pack/plugin/otel-data/src/yamlRestTest/java/org/elasticsearch/xpack/oteldata/OTelYamlTestSuiteIT.java b/x-pack/plugin/otel-data/src/yamlRestTest/java/org/elasticsearch/xpack/oteldata/OTelYamlTestSuiteIT.java new file mode 100644 index 0000000000000..4a5f7d03b12a2 --- /dev/null +++ b/x-pack/plugin/otel-data/src/yamlRestTest/java/org/elasticsearch/xpack/oteldata/OTelYamlTestSuiteIT.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.oteldata; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; + +public class OTelYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("constant-keyword") + .module("counted-keyword") + .module("data-streams") + .module("ingest-common") + .module("ingest-geoip") + .module("ingest-user-agent") + .module("lang-mustache") + .module("mapper-extras") + .module("wildcard") + .module("x-pack-analytics") + .module("x-pack-otel-data") + .module("x-pack-aggregate-metric") + .module("x-pack-ilm") + .module("x-pack-stack") + .module("mapper-version") + .setting("ingest.geoip.downloader.enabled", "false") + .setting("xpack.otel_data.registry.enabled", "true") + .build(); + + public OTelYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/10_otel.yml b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/10_otel.yml new file mode 100644 index 0000000000000..72b7a127dcd02 --- /dev/null +++ b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/10_otel.yml @@ -0,0 +1,38 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid + +--- +"Test traces-otel* template installation": + - skip: + reason: contains is a newly added assertion + features: contains + - do: + indices.get_index_template: + name: traces-otel* + - length: {index_templates: 1} + - contains: {index_templates: {name: traces-otel@template}} + +--- +"Test metrics-otel* template installation": + - skip: + reason: contains is a newly added assertion + features: contains + - do: + indices.get_index_template: + name: metrics-otel* + - length: {index_templates: 1} + - contains: {index_templates: {name: metrics-otel@template}} + +--- +"Test logs-otel* template installation": + - skip: + reason: contains is a newly added assertion + features: contains + - do: + indices.get_index_template: + name: logs-otel* + - length: {index_templates: 1} + - contains: {index_templates: {name: logs-otel@template}} diff --git a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs.tests.yml b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs.tests.yml new file mode 100644 index 0000000000000..d87c2a80deab8 --- /dev/null +++ b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs.tests.yml @@ -0,0 +1,22 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid +--- +"Default data_stream.type must be logs": + - do: + bulk: + index: logs-generic.otel-default + refresh: true + body: + - create: {} + - '{"@timestamp":"2024-07-18T14:48:33.467654000Z","data_stream":{"dataset":"generic.otel","namespace":"default"}, "attributes": { "foo": "bar"}, "body_text":"Error: Unable to connect to the database.","severity_text":"ERROR","severity_number":3,"trace_id":"abc123xyz456def789ghi012jkl345"}' + - is_false: errors + - do: + search: + index: logs-generic.otel-default + body: + fields: ["data_stream.type"] + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.data_stream\.type: ["logs"] } diff --git a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_tests.yml b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_tests.yml new file mode 100644 index 0000000000000..a6591d6c32210 --- /dev/null +++ b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_tests.yml @@ -0,0 +1,149 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid + - do: + cluster.put_component_template: + name: metrics-otel@custom + body: + template: + settings: + index: + routing_path: [unit, attributes.*, resource.attributes.*] + mode: time_series + time_series: + start_time: 2024-07-01T13:03:08.138Z +--- +"Test push service overview metric": + - do: + indices.get_index_template: + name: metrics-otel@template + - length: {index_templates: 1} + - do: + bulk: + index: metrics-generic.otel-default + refresh: true + body: + - create: {} + - '{"@timestamp":"2024-07-18T14:48:33.467654000Z","data_stream":{"dataset":"generic.otel","namespace":"default","type":"metrics"},"resource":{"attributes":{"service.name":"OtelSample","telemetry.sdk.language":"dotnet","telemetry.sdk.name":"opentelemetry"},"dropped_attributes_count":0},"attributes":{"processor.event":"metric"}}' + - is_false: errors + - do: + search: + index: metrics-generic.otel-default + body: + fields: ["service.name", "telemetry.sdk.language", "telemetry.sdk.name" ] + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.service\.name: [ "OtelSample" ] } + - match: { hits.hits.0.fields.telemetry\.sdk\.language: [ "dotnet" ] } + - match: { hits.hits.0.fields.telemetry\.sdk\.name: [ "opentelemetry" ] } +--- +"Query resource attributes as top level": + - do: + bulk: + index: metrics-generic.otel-default + refresh: true + body: + - create: {} + - '{"@timestamp":"2024-07-18T14:48:33.467654000Z","data_stream":{"dataset":"generic.otel","namespace":"default","type":"metrics"},"processor":{"event":"metric"},"resource":{"attributes":{"service.name":"OtelSample","telemetry.sdk.language":"dotnet","telemetry.sdk.name":"opentelemetry"},"dropped_attributes_count":0}}' + - is_false: errors + - do: + search: + index: metrics-generic.otel-default + body: + fields: ["service.name", "telemetry.sdk.language", "telemetry.sdk.name"] + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.service\.name: [ "OtelSample" ] } + - match: { hits.hits.0.fields.telemetry\.sdk\.language: [ "dotnet" ] } + - match: { hits.hits.0.fields.telemetry\.sdk\.name: [ "opentelemetry" ] } +--- +"Query attributes as top level": + - do: + bulk: + index: metrics-generic.otel-default + refresh: true + body: + - create: {} + - '{"@timestamp":"2024-07-18T14:48:33.467654000Z","data_stream":{"dataset":"generic.otel","namespace":"default","type":"metrics"},"attributes":{"processor.event":"metric", "foo": "bar"},"resource":{"attributes":{"service.name":"OtelSample","telemetry.sdk.language":"dotnet","telemetry.sdk.name":"opentelemetry"},"dropped_attributes_count":0}}' + - is_false: errors + - do: + search: + index: metrics-generic.otel-default + body: + fields: ["foo"] + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.foo\: ["bar"] } +--- +"Boolean as dimension": + - do: + bulk: + index: metrics-generic.otel-default + refresh: true + body: + - create: {} + - '{"@timestamp":"2024-07-18T14:48:33.467654000Z","data_stream":{"dataset":"generic.otel","namespace":"default","type":"metrics"},"attributes":{"processor.event":"metric","transaction.root":false},"resource":{"attributes":{"service.name":"OtelSample","telemetry.sdk.language":"dotnet","telemetry.sdk.name":"opentelemetry"}}}' + - is_false: errors + - do: + search: + index: metrics-generic.otel-default + body: + fields: ["transaction.root"] + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.transaction\.root: [false] } +--- +"Default data_stream.type must be metrics": + - do: + bulk: + index: metrics-generic.otel-default + refresh: true + body: + - create: {} + - '{"@timestamp":"2024-07-18T14:48:33.467654000Z","data_stream":{"dataset":"generic.otel","namespace":"default"},"attributes":{"processor.event":"metric","transaction.root":false},"resource":{"attributes":{"service.name":"OtelSample","telemetry.sdk.language":"dotnet","telemetry.sdk.name":"opentelemetry"}}}' + - is_false: errors + - do: + search: + index: metrics-generic.otel-default + body: + fields: ["data_stream.type"] + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.data_stream\.type: ["metrics"] } + +--- +"Custom dynamic template": + - do: + cluster.put_component_template: + name: metrics-otel@custom + body: + template: + settings: + index: + routing_path: [unit, attributes.*, resource.attributes.*] + mode: time_series + time_series: + start_time: 2024-07-01T13:03:08.138Z + mappings: + dynamic_templates: + - ip_fields: + mapping: + type: ip + match_mapping_type: string + path_match: "*.ip" + - do: + bulk: + index: metrics-generic.otel-default + refresh: true + body: + - create: {} + - '{"@timestamp":"2024-07-18T14:48:33.467654000Z","data_stream":{"dataset":"generic.otel","namespace":"default"},"attributes":{"host.ip":"127.0.0.1","foo":"bar"}}' + - is_false: errors + - do: + indices.get_data_stream: + name: metrics-generic.otel-default + - set: { data_streams.0.indices.0.index_name: idx0name } + + - do: + indices.get_mapping: + index: $idx0name + expand_wildcards: hidden + - match: { .$idx0name.mappings.properties.attributes.properties.host\.ip.type: 'ip' } + - match: { .$idx0name.mappings.properties.attributes.properties.foo.type: "keyword" } diff --git a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_traces_tests.yml b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_traces_tests.yml new file mode 100644 index 0000000000000..abdb8d49d774c --- /dev/null +++ b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_traces_tests.yml @@ -0,0 +1,94 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid +--- +"Test pushing simple trace": + - do: + bulk: + index: traces-generic.otel-default + refresh: true + body: + - create: {} + - '{"@timestamp":"2024-02-18T14:48:33.467654000Z","data_stream":{"dataset":"generic.otel","type":"traces","namespace":"default"},"resource":{"attributes":{"service.name":"OtelSample","telemetry.sdk.language":"dotnet","telemetry.sdk.name":"opentelemetry"}},"name":"foo","trace_id":"7bba9f33312b3dbb8b2c2c62bb7abe2d","span_id":"086e83747d0e381e","kind":"SERVER","status":{"code":"2xx"}}' + - is_false: errors + - do: + search: + index: traces-generic.otel-default + - length: { hits.hits: 1 } + +--- +"Query resource attributes as top level": + - do: + bulk: + index: traces-generic.otel-default + refresh: true + body: + - create: {} + - '{"@timestamp":"2024-02-18T14:48:33.467654000Z","data_stream":{"dataset":"generic.otel","type":"traces","namespace":"default"},"resource":{"attributes":{"service.name":"OtelSample","telemetry.sdk.language":"dotnet","telemetry.sdk.name":"opentelemetry"}},"name":"foo","trace_id":"7bba9f33312b3dbb8b2c2c62bb7abe2d","span_id":"086e83747d0e381e","kind":"SERVER","status":{"code":"2xx"}}' + - is_false: errors + - do: + search: + index: traces-generic.otel-default + body: + fields: ["service.name", "telemetry.sdk.language", "telemetry.sdk.name" ] + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.service\.name: [ "OtelSample" ] } + - match: { hits.hits.0.fields.telemetry\.sdk\.language: [ "dotnet" ] } + - match: { hits.hits.0.fields.telemetry\.sdk\.name: [ "opentelemetry" ] } +--- +"Query attributes as top level": + - do: + bulk: + index: traces-generic.otel-default + refresh: true + body: + - create: {} + - '{"@timestamp":"2024-02-18T14:48:33.467654000Z","data_stream":{"dataset":"generic.otel","type":"traces","namespace":"default"},"resource":{"attributes":{"service.name":"OtelSample","telemetry.sdk.language":"dotnet","telemetry.sdk.name":"opentelemetry"}},"attributes":{"db.type":"mssql","db.name":"foo","db.operation":"SELECT","db.statement":"SELECT * FROM wuser_table"},"name":"foo","trace_id":"7bba9f33312b3dbb8b2c2c62bb7abe2d","span_id":"086e83747d0e381e","kind":"SERVER","status":{"code":"2xx"}}' + - is_false: errors + - do: + search: + index: traces-generic.otel-default + body: + fields: ["db.type", "db.name", "db.operation", "db.statement"] + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.db\.type: [ "mssql" ] } + - match: { hits.hits.0.fields.db\.operation: [ "SELECT" ] } + - match: { hits.hits.0.fields.db\.statement: [ "SELECT * FROM wuser_table" ] } +--- +"Span links test": + - do: + bulk: + index: traces-generic.otel-default + refresh: true + body: + - create: {} + - '{"@timestamp":"2024-02-18T14:48:33.467654000Z","data_stream":{"dataset":"generic.otel","type":"traces","namespace":"default"},"resource":{"attributes":{"service.name":"OtelSample","telemetry.sdk.language":"dotnet","telemetry.sdk.name":"opentelemetry"}},"attributes":{"db.type":"mssql","db.name":"foo","db.operation":"SELECT","db.statement":"SELECT * FROM wuser_table"},"links":[{"trace_id":"4aaa9f33312b3dbb8b2c2c62bb7abe1a1","span_id":"086e83747d0e381e","attributes":{"foo":"bar"}},{"trace_id":"4aaa9f33312b3dbb8b2c2c62bb7abe1a1","span_id":"b3b7d1f1f1b4e1e1"}],"name":"foo","trace_id":"7bba9f33312b3dbb8b2c2c62bb7abe2d","span_id":"086e83747d0e381e","kind":"SERVER","status":{"code":"2xx"}}' + - is_false: errors + - do: + search: + index: traces-generic.otel-default + - length: { hits.hits.0._source.links: 2 } + - match: { hits.hits.0._source.links.0.trace_id: "4aaa9f33312b3dbb8b2c2c62bb7abe1a1" } + - match: { hits.hits.0._source.links.0.span_id: "086e83747d0e381e" } + - match: { hits.hits.0._source.links.0.attributes.foo: "bar" } + - match: { hits.hits.0._source.links.1.trace_id: "4aaa9f33312b3dbb8b2c2c62bb7abe1a1" } + - match: { hits.hits.0._source.links.1.span_id: "b3b7d1f1f1b4e1e1" } +--- +"Default data_stream.type must be traces": + - do: + bulk: + index: traces-generic.otel-default + refresh: true + body: + - create: {} + - '{"@timestamp":"2024-02-18T14:48:33.467654000Z","data_stream":{"dataset":"generic.otel","type":"traces","namespace":"default"},"resource":{"attributes":{"service.name":"OtelSample","telemetry.sdk.language":"dotnet","telemetry.sdk.name":"opentelemetry"}},"name":"foo","trace_id":"7bba9f33312b3dbb8b2c2c62bb7abe2d","span_id":"086e83747d0e381e","kind":"SERVER","status":{"code":"2xx"}}' + - is_false: errors + - do: + search: + index: traces-generic.otel-default + body: + fields: ["data_stream.type"] + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.data_stream\.type: ["traces"] } diff --git a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/30_non_ecs_alias_tests.yml b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/30_non_ecs_alias_tests.yml new file mode 100644 index 0000000000000..d80c52c756b54 --- /dev/null +++ b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/30_non_ecs_alias_tests.yml @@ -0,0 +1,37 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid + - do: + cluster.put_component_template: + name: metrics-otel@custom + body: + template: + settings: + index: + routing_path: [unit, attributes.*, resource.attributes.*] + mode: time_series + time_series: + start_time: 2024-07-01T13:03:08.138Z +--- +"Test alias from service.language.name non-ecs field to telemetry.sdk.language": + - do: + indices.get_index_template: + name: metrics-otel@template + - length: {index_templates: 1} + - do: + bulk: + index: metrics-generic.otel-default + refresh: true + body: + - create: {} + - '{"@timestamp":"2024-07-18T14:48:33.467654000Z","data_stream":{"dataset":"generic.otel","namespace":"default","type":"metrics"}, "attributes": {"processor.event":"metric"}, "resource":{"attributes":{"service.name":"OtelSample","telemetry.sdk.language":"dotnet","telemetry.sdk.name":"opentelemetry"},"dropped_attributes_count":0}}' + - is_false: errors + - do: + search: + index: metrics-generic.otel-default + body: + fields: ["service.language.name"] + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.service\.language\.name: [ "dotnet" ] } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java index 194440722545a..4173f3db45409 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java @@ -39,6 +39,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.LOGSTASH_MANAGEMENT_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.MONITORING_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.OTEL_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.PROFILING_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.ROLLUP_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.SEARCHABLE_SNAPSHOTS_ORIGIN; @@ -151,6 +152,7 @@ public static void switchUserBasedOnActionOriginAndExecute( case INGEST_ORIGIN: case PROFILING_ORIGIN: case APM_ORIGIN: + case OTEL_ORIGIN: case STACK_ORIGIN: case SEARCHABLE_SNAPSHOTS_ORIGIN: case LOGSTASH_MANAGEMENT_ORIGIN: diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java index 6a9936f4f27d3..9e847455d2c86 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java @@ -48,7 +48,7 @@ public class StackTemplateRegistry extends IndexTemplateRegistry { // The stack template registry version. This number must be incremented when we make changes // to built-in templates. - public static final int REGISTRY_VERSION = 12; + public static final int REGISTRY_VERSION = 13; public static final String TEMPLATE_VERSION_VARIABLE = "xpack.stack.template.version"; public static final Setting STACK_TEMPLATES_ENABLED = Setting.boolSetting( @@ -106,6 +106,12 @@ public class StackTemplateRegistry extends IndexTemplateRegistry { public static final String METRICS_ILM_POLICY_NAME = "metrics@lifecycle"; public static final String METRICS_INDEX_TEMPLATE_NAME = "metrics"; + ////////////////////////////////////////////////////////// + // Base traces components + ////////////////////////////////////////////////////////// + public static final String TRACES_MAPPINGS_COMPONENT_TEMPLATE_NAME = "traces@mappings"; + public static final String TRACES_SETTINGS_COMPONENT_TEMPLATE_NAME = "traces@settings"; + ////////////////////////////////////////////////////////// // Synthetics components (for matching synthetics-*-* indices) ////////////////////////////////////////////////////////// @@ -192,6 +198,20 @@ private Map loadComponentTemplateConfigs(boolean logs TEMPLATE_VERSION_VARIABLE, ADDITIONAL_TEMPLATE_VARIABLES ), + new IndexTemplateConfig( + TRACES_SETTINGS_COMPONENT_TEMPLATE_NAME, + "/traces@settings.json", + REGISTRY_VERSION, + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES + ), + new IndexTemplateConfig( + TRACES_MAPPINGS_COMPONENT_TEMPLATE_NAME, + "/traces@mappings.json", + REGISTRY_VERSION, + TEMPLATE_VERSION_VARIABLE, + ADDITIONAL_TEMPLATE_VARIABLES + ), new IndexTemplateConfig( SYNTHETICS_MAPPINGS_COMPONENT_TEMPLATE_NAME, "/synthetics@mappings.json", diff --git a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java index abb2d5765b128..25ff3b5311fa2 100644 --- a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java +++ b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java @@ -430,10 +430,12 @@ public void testSameOrHigherVersionTemplateNotUpgraded() { versions.put(StackTemplateRegistry.SYNTHETICS_SETTINGS_COMPONENT_TEMPLATE_NAME, StackTemplateRegistry.REGISTRY_VERSION); versions.put(StackTemplateRegistry.SYNTHETICS_MAPPINGS_COMPONENT_TEMPLATE_NAME, StackTemplateRegistry.REGISTRY_VERSION); versions.put(StackTemplateRegistry.KIBANA_REPORTING_COMPONENT_TEMPLATE_NAME, StackTemplateRegistry.REGISTRY_VERSION); + versions.put(StackTemplateRegistry.TRACES_MAPPINGS_COMPONENT_TEMPLATE_NAME, StackTemplateRegistry.REGISTRY_VERSION); + versions.put(StackTemplateRegistry.TRACES_SETTINGS_COMPONENT_TEMPLATE_NAME, StackTemplateRegistry.REGISTRY_VERSION); ClusterChangedEvent sameVersionEvent = createClusterChangedEvent(versions, nodes); client.setVerifier((action, request, listener) -> { - if (action instanceof PutComponentTemplateAction) { - fail("template should not have been re-installed"); + if (request instanceof PutComponentTemplateAction.Request put) { + fail("template should not have been re-installed: " + put.name()); return null; } else if (action == ILMActions.PUT) { // Ignore this, it's verified in another test @@ -489,6 +491,14 @@ public void testSameOrHigherVersionTemplateNotUpgraded() { StackTemplateRegistry.KIBANA_REPORTING_COMPONENT_TEMPLATE_NAME, StackTemplateRegistry.REGISTRY_VERSION + randomIntBetween(1, 1000) ); + versions.put( + StackTemplateRegistry.TRACES_MAPPINGS_COMPONENT_TEMPLATE_NAME, + StackTemplateRegistry.REGISTRY_VERSION + randomIntBetween(1, 1000) + ); + versions.put( + StackTemplateRegistry.TRACES_SETTINGS_COMPONENT_TEMPLATE_NAME, + StackTemplateRegistry.REGISTRY_VERSION + randomIntBetween(1, 1000) + ); ClusterChangedEvent higherVersionEvent = createClusterChangedEvent(versions, nodes); registry.clusterChanged(higherVersionEvent); } From 69f454370ac13663742128d39fb001e0ea1991c6 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 19 Aug 2024 08:26:16 +0100 Subject: [PATCH 067/389] Fix known issue docs for #111866 (#111956) The `known-issue-8.15.0` anchor appears twice which breaks the docs build. Also the existing message suggests incorrectly that `bootstrap.memory_lock: true` is recommended. --- docs/reference/release-notes/8.15.0.asciidoc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/reference/release-notes/8.15.0.asciidoc b/docs/reference/release-notes/8.15.0.asciidoc index 80e935e130678..e2314381a4b06 100644 --- a/docs/reference/release-notes/8.15.0.asciidoc +++ b/docs/reference/release-notes/8.15.0.asciidoc @@ -16,7 +16,11 @@ after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#11053 * Pipeline aggregations under `time_series` and `categorize_text` aggregations are never returned (issue: {es-issue}111679[#111679]) -* Elasticsearch will not start on Windows machines when the recommended [bootstrap.memory_lock: true](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#bootstrap-memory_lock) setting is configured due to [native access refactoring](https://github.com/elastic/elasticsearch/pull/111866). The workaround for 8.15.0 is to downgrade to the previous version. This issue will be fixed in 8.15.1. +* Elasticsearch will not start on Windows machines if +[`bootstrap.memory_lock` is set to `true`](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#bootstrap-memory_lock). +Either downgrade to an earlier version, upgrade to 8.15.1, or else follow the +recommendation in the manual to entirely disable swap instead of using the +memory lock feature (issue: {es-issue}111847[#111847]) [[breaking-8.15.0]] [float] From 24b852b3d64239882ab898b9e241a9b69a084109 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Mon, 19 Aug 2024 11:28:01 +0300 Subject: [PATCH 068/389] Clean the last traces from global retention in templates (#111669) --- .../org/elasticsearch/TransportVersions.java | 2 + .../get/GetComponentTemplateAction.java | 53 ++++++++++------- .../get/GetComposableIndexTemplateAction.java | 57 ++++++++++++------- .../TransportGetComponentTemplateAction.java | 11 +--- ...sportGetComposableIndexTemplateAction.java | 11 +--- .../post/SimulateIndexTemplateResponse.java | 31 ++++------ .../TransportSimulateIndexTemplateAction.java | 15 ++--- .../post/TransportSimulateTemplateAction.java | 13 +---- .../rest/action/cat/RestTemplatesAction.java | 2 +- .../GetComponentTemplateResponseTests.java | 19 ++----- ...tComposableIndexTemplateResponseTests.java | 36 ++++++++++-- 11 files changed, 132 insertions(+), 118 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 1995c430472ba..fd3a3d8672966 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -190,6 +190,8 @@ static TransportVersion def(int id) { public static final TransportVersion ML_INFERENCE_EIS_INTEGRATION_ADDED = def(8_720_00_0); public static final TransportVersion INGEST_PIPELINE_EXCEPTION_ADDED = def(8_721_00_0); public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_722_00_0); + public static final TransportVersion REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES = def(8_723_00_0); + /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java index da588cbadc0d8..f0552cc3226f5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -121,8 +121,6 @@ public static class Response extends ActionResponse implements ToXContentObject private final Map componentTemplates; @Nullable private final RolloverConfiguration rolloverConfiguration; - @Nullable - private final DataStreamGlobalRetention globalRetention; public Response(StreamInput in) throws IOException { super(in); @@ -132,29 +130,39 @@ public Response(StreamInput in) throws IOException { } else { rolloverConfiguration = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { - globalRetention = in.readOptionalWriteable(DataStreamGlobalRetention::read); - } else { - globalRetention = null; + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) + && in.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + in.readOptionalWriteable(DataStreamGlobalRetention::read); } } - public Response(Map componentTemplates, RolloverConfiguration rolloverConfiguration) { - this(componentTemplates, rolloverConfiguration, null); - } - + /** + * Please use {@link GetComponentTemplateAction.Response#Response(Map)} + */ + @Deprecated public Response(Map componentTemplates, @Nullable DataStreamGlobalRetention globalRetention) { - this(componentTemplates, null, globalRetention); + this(componentTemplates, (RolloverConfiguration) null); } + /** + * Please use {@link GetComponentTemplateAction.Response#Response(Map, RolloverConfiguration)} + */ + @Deprecated public Response( Map componentTemplates, @Nullable RolloverConfiguration rolloverConfiguration, - @Nullable DataStreamGlobalRetention globalRetention + @Nullable DataStreamGlobalRetention ignored ) { + this(componentTemplates, rolloverConfiguration); + } + + public Response(Map componentTemplates) { + this(componentTemplates, (RolloverConfiguration) null); + } + + public Response(Map componentTemplates, @Nullable RolloverConfiguration rolloverConfiguration) { this.componentTemplates = componentTemplates; this.rolloverConfiguration = rolloverConfiguration; - this.globalRetention = globalRetention; } public Map getComponentTemplates() { @@ -165,8 +173,14 @@ public RolloverConfiguration getRolloverConfiguration() { return rolloverConfiguration; } + /** + * @return null + * @deprecated The global retention is not used anymore in the component template response + */ + @Deprecated + @Nullable public DataStreamGlobalRetention getGlobalRetention() { - return globalRetention; + return null; } @Override @@ -175,8 +189,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { - out.writeOptionalWriteable(globalRetention); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) + && out.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + out.writeOptionalWriteable(null); } } @@ -186,13 +201,12 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; Response that = (Response) o; return Objects.equals(componentTemplates, that.componentTemplates) - && Objects.equals(rolloverConfiguration, that.rolloverConfiguration) - && Objects.equals(globalRetention, that.globalRetention); + && Objects.equals(rolloverConfiguration, that.rolloverConfiguration); } @Override public int hashCode() { - return Objects.hash(componentTemplates, rolloverConfiguration, globalRetention); + return Objects.hash(componentTemplates, rolloverConfiguration); } @Override @@ -212,5 +226,4 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index e40977a382ba1..ba07c87e753e6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -122,8 +122,6 @@ public static class Response extends ActionResponse implements ToXContentObject private final Map indexTemplates; @Nullable private final RolloverConfiguration rolloverConfiguration; - @Nullable - private final DataStreamGlobalRetention globalRetention; public Response(StreamInput in) throws IOException { super(in); @@ -133,37 +131,57 @@ public Response(StreamInput in) throws IOException { } else { rolloverConfiguration = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { - globalRetention = in.readOptionalWriteable(DataStreamGlobalRetention::read); - } else { - globalRetention = null; + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) + && in.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + in.readOptionalWriteable(DataStreamGlobalRetention::read); } } + /** + * Please use {@link GetComposableIndexTemplateAction.Response#Response(Map)} + */ public Response(Map indexTemplates, @Nullable DataStreamGlobalRetention globalRetention) { - this(indexTemplates, null, globalRetention); - } - - public Response(Map indexTemplates) { - this(indexTemplates, null, null); + this(indexTemplates, (RolloverConfiguration) null); } + /** + * Please use {@link GetComposableIndexTemplateAction.Response#Response(Map, RolloverConfiguration)} + */ + @Deprecated public Response( Map indexTemplates, @Nullable RolloverConfiguration rolloverConfiguration, @Nullable DataStreamGlobalRetention globalRetention ) { + this(indexTemplates, rolloverConfiguration); + } + + public Response(Map indexTemplates) { + this(indexTemplates, (RolloverConfiguration) null); + } + + public Response(Map indexTemplates, @Nullable RolloverConfiguration rolloverConfiguration) { this.indexTemplates = indexTemplates; this.rolloverConfiguration = rolloverConfiguration; - this.globalRetention = globalRetention; } public Map indexTemplates() { return indexTemplates; } + /** + * @return null + * @deprecated global retention is not used in composable templates anymore + */ + @Deprecated + @Nullable public DataStreamGlobalRetention getGlobalRetention() { - return globalRetention; + return null; + } + + @Nullable + public RolloverConfiguration getRolloverConfiguration() { + return rolloverConfiguration; } @Override @@ -172,8 +190,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { - out.writeOptionalWriteable(globalRetention); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) + && out.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + out.writeOptionalWriteable(null); } } @@ -182,14 +201,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; GetComposableIndexTemplateAction.Response that = (GetComposableIndexTemplateAction.Response) o; - return Objects.equals(indexTemplates, that.indexTemplates) - && Objects.equals(rolloverConfiguration, that.rolloverConfiguration) - && Objects.equals(globalRetention, that.globalRetention); + return Objects.equals(indexTemplates, that.indexTemplates) && Objects.equals(rolloverConfiguration, that.rolloverConfiguration); } @Override public int hashCode() { - return Objects.hash(indexTemplates, rolloverConfiguration, globalRetention); + return Objects.hash(indexTemplates, rolloverConfiguration); } @Override @@ -207,7 +224,5 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); return builder; } - } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java index 1739b279014ee..fcc053b8181fa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java @@ -16,7 +16,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.ComponentTemplate; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; @@ -36,7 +35,6 @@ public class TransportGetComponentTemplateAction extends TransportMasterNodeRead GetComponentTemplateAction.Response> { private final ClusterSettings clusterSettings; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; @Inject public TransportGetComponentTemplateAction( @@ -44,8 +42,7 @@ public TransportGetComponentTemplateAction( ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - DataStreamGlobalRetentionProvider globalRetentionResolver + IndexNameExpressionResolver indexNameExpressionResolver ) { super( GetComponentTemplateAction.NAME, @@ -59,7 +56,6 @@ public TransportGetComponentTemplateAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); clusterSettings = clusterService.getClusterSettings(); - this.globalRetentionResolver = globalRetentionResolver; } @Override @@ -100,12 +96,11 @@ protected void masterOperation( listener.onResponse( new GetComponentTemplateAction.Response( results, - clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING), - globalRetentionResolver.provide() + clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) ) ); } else { - listener.onResponse(new GetComponentTemplateAction.Response(results, globalRetentionResolver.provide())); + listener.onResponse(new GetComponentTemplateAction.Response(results)); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java index 6ccaad593a448..e2ce172a1bf0b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java @@ -16,7 +16,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; @@ -36,7 +35,6 @@ public class TransportGetComposableIndexTemplateAction extends TransportMasterNo GetComposableIndexTemplateAction.Response> { private final ClusterSettings clusterSettings; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; @Inject public TransportGetComposableIndexTemplateAction( @@ -44,8 +42,7 @@ public TransportGetComposableIndexTemplateAction( ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - DataStreamGlobalRetentionProvider globalRetentionResolver + IndexNameExpressionResolver indexNameExpressionResolver ) { super( GetComposableIndexTemplateAction.NAME, @@ -59,7 +56,6 @@ public TransportGetComposableIndexTemplateAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); clusterSettings = clusterService.getClusterSettings(); - this.globalRetentionResolver = globalRetentionResolver; } @Override @@ -98,12 +94,11 @@ protected void masterOperation( listener.onResponse( new GetComposableIndexTemplateAction.Response( results, - clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING), - globalRetentionResolver.provide() + clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) ) ); } else { - listener.onResponse(new GetComposableIndexTemplateAction.Response(results, globalRetentionResolver.provide())); + listener.onResponse(new GetComposableIndexTemplateAction.Response(results)); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index a2fe2e5056c4d..a27defd2c655c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -46,27 +46,19 @@ public class SimulateIndexTemplateResponse extends ActionResponse implements ToX @Nullable private final RolloverConfiguration rolloverConfiguration; - @Nullable - private final DataStreamGlobalRetention globalRetention; - public SimulateIndexTemplateResponse( - @Nullable Template resolvedTemplate, - @Nullable Map> overlappingTemplates, - DataStreamGlobalRetention globalRetention - ) { - this(resolvedTemplate, overlappingTemplates, null, globalRetention); + public SimulateIndexTemplateResponse(@Nullable Template resolvedTemplate, @Nullable Map> overlappingTemplates) { + this(resolvedTemplate, overlappingTemplates, null); } public SimulateIndexTemplateResponse( @Nullable Template resolvedTemplate, @Nullable Map> overlappingTemplates, - @Nullable RolloverConfiguration rolloverConfiguration, - @Nullable DataStreamGlobalRetention globalRetention + @Nullable RolloverConfiguration rolloverConfiguration ) { this.resolvedTemplate = resolvedTemplate; this.overlappingTemplates = overlappingTemplates; this.rolloverConfiguration = rolloverConfiguration; - this.globalRetention = globalRetention; } public RolloverConfiguration getRolloverConfiguration() { @@ -89,9 +81,10 @@ public SimulateIndexTemplateResponse(StreamInput in) throws IOException { rolloverConfiguration = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) ? in.readOptionalWriteable(RolloverConfiguration::new) : null; - globalRetention = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) - ? in.readOptionalWriteable(DataStreamGlobalRetention::read) - : null; + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) + && in.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + in.readOptionalWriteable(DataStreamGlobalRetention::read); + } } @Override @@ -110,8 +103,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { - out.writeOptionalWriteable(globalRetention); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) + && out.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + out.writeOptionalWriteable(null); } } @@ -147,13 +141,12 @@ public boolean equals(Object o) { SimulateIndexTemplateResponse that = (SimulateIndexTemplateResponse) o; return Objects.equals(resolvedTemplate, that.resolvedTemplate) && Objects.deepEquals(overlappingTemplates, that.overlappingTemplates) - && Objects.equals(rolloverConfiguration, that.rolloverConfiguration) - && Objects.equals(globalRetention, that.globalRetention); + && Objects.equals(rolloverConfiguration, that.rolloverConfiguration); } @Override public int hashCode() { - return Objects.hash(resolvedTemplate, overlappingTemplates, rolloverConfiguration, globalRetention); + return Objects.hash(resolvedTemplate, overlappingTemplates, rolloverConfiguration); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index 911648d06faa8..6fcaad47e0d72 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -16,8 +16,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -74,7 +72,6 @@ public class TransportSimulateIndexTemplateAction extends TransportMasterNodeRea private final Set indexSettingProviders; private final ClusterSettings clusterSettings; private final boolean isDslOnlyMode; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; @Inject public TransportSimulateIndexTemplateAction( @@ -87,8 +84,7 @@ public TransportSimulateIndexTemplateAction( NamedXContentRegistry xContentRegistry, IndicesService indicesService, SystemIndices systemIndices, - IndexSettingProviders indexSettingProviders, - DataStreamGlobalRetentionProvider globalRetentionResolver + IndexSettingProviders indexSettingProviders ) { super( SimulateIndexTemplateAction.NAME, @@ -108,7 +104,6 @@ public TransportSimulateIndexTemplateAction( this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); this.clusterSettings = clusterService.getClusterSettings(); this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); - this.globalRetentionResolver = globalRetentionResolver; } @Override @@ -118,7 +113,6 @@ protected void masterOperation( ClusterState state, ActionListener listener ) throws Exception { - final DataStreamGlobalRetention globalRetention = globalRetentionResolver.provide(); final ClusterState stateWithTemplate; if (request.getIndexTemplateRequest() != null) { // we'll "locally" add the template defined by the user in the cluster state (as if it existed in the system) @@ -144,7 +138,7 @@ protected void masterOperation( String matchingTemplate = findV2Template(stateWithTemplate.metadata(), request.getIndexName(), false); if (matchingTemplate == null) { - listener.onResponse(new SimulateIndexTemplateResponse(null, null, null)); + listener.onResponse(new SimulateIndexTemplateResponse(null, null)); return; } @@ -172,12 +166,11 @@ protected void masterOperation( new SimulateIndexTemplateResponse( template, overlapping, - clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING), - globalRetention + clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) ) ); } else { - listener.onResponse(new SimulateIndexTemplateResponse(template, overlapping, globalRetention)); + listener.onResponse(new SimulateIndexTemplateResponse(template, overlapping)); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java index 511efe072960d..ead00dc858a47 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java @@ -15,8 +15,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; @@ -60,7 +58,6 @@ public class TransportSimulateTemplateAction extends TransportMasterNodeReadActi private final Set indexSettingProviders; private final ClusterSettings clusterSettings; private final boolean isDslOnlyMode; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; @Inject public TransportSimulateTemplateAction( @@ -73,8 +70,7 @@ public TransportSimulateTemplateAction( NamedXContentRegistry xContentRegistry, IndicesService indicesService, SystemIndices systemIndices, - IndexSettingProviders indexSettingProviders, - DataStreamGlobalRetentionProvider globalRetentionResolver + IndexSettingProviders indexSettingProviders ) { super( SimulateTemplateAction.NAME, @@ -94,7 +90,6 @@ public TransportSimulateTemplateAction( this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); this.clusterSettings = clusterService.getClusterSettings(); this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); - this.globalRetentionResolver = globalRetentionResolver; } @Override @@ -104,7 +99,6 @@ protected void masterOperation( ClusterState state, ActionListener listener ) throws Exception { - final DataStreamGlobalRetention globalRetention = globalRetentionResolver.provide(); String uuid = UUIDs.randomBase64UUID().toLowerCase(Locale.ROOT); final String temporaryIndexName = "simulate_template_index_" + uuid; final ClusterState stateWithTemplate; @@ -182,12 +176,11 @@ protected void masterOperation( new SimulateIndexTemplateResponse( template, overlapping, - clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING), - globalRetention + clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) ) ); } else { - listener.onResponse(new SimulateIndexTemplateResponse(template, overlapping, globalRetention)); + listener.onResponse(new SimulateIndexTemplateResponse(template, overlapping)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java index 849e2d68cb2dc..876edad49a7dc 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java @@ -76,7 +76,7 @@ protected RestChannelConsumer doCatRequest(final RestRequest request, NodeClient getComposableTemplatesRequest, getComposableTemplatesStep.delegateResponse((l, e) -> { if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { - l.onResponse(new GetComposableIndexTemplateAction.Response(Map.of(), null)); + l.onResponse(new GetComposableIndexTemplateAction.Response(Map.of())); } else { l.onFailure(e); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateResponseTests.java index d31c9fddf2712..5f25903aeaa50 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateResponseTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComponentTemplateTests; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionTests; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.Strings; @@ -45,8 +44,7 @@ protected Writeable.Reader instanceReader() protected GetComponentTemplateAction.Response createTestInstance() { return new GetComponentTemplateAction.Response( randomBoolean() ? Map.of() : randomTemplates(), - RolloverConfigurationTests.randomRolloverConditions(), - DataStreamGlobalRetentionTests.randomGlobalRetention() + RolloverConfigurationTests.randomRolloverConditions() ); } @@ -54,13 +52,11 @@ protected GetComponentTemplateAction.Response createTestInstance() { protected GetComponentTemplateAction.Response mutateInstance(GetComponentTemplateAction.Response instance) { var templates = instance.getComponentTemplates(); var rolloverConditions = instance.getRolloverConfiguration(); - var globalRetention = instance.getGlobalRetention(); - switch (randomInt(2)) { + switch (randomInt(1)) { case 0 -> templates = templates == null ? randomTemplates() : null; case 1 -> rolloverConditions = randomValueOtherThan(rolloverConditions, RolloverConfigurationTests::randomRolloverConditions); - case 2 -> globalRetention = randomValueOtherThan(globalRetention, DataStreamGlobalRetentionTests::randomGlobalRetention); } - return new GetComponentTemplateAction.Response(templates, rolloverConditions, globalRetention); + return new GetComponentTemplateAction.Response(templates, rolloverConditions); } public void testXContentSerializationWithRolloverAndEffectiveRetention() throws IOException { @@ -84,20 +80,15 @@ public void testXContentSerializationWithRolloverAndEffectiveRetention() throws null, false ); - var globalRetention = DataStreamGlobalRetentionTests.randomGlobalRetention(); var rolloverConfiguration = RolloverConfigurationTests.randomRolloverConditions(); - var response = new GetComponentTemplateAction.Response( - Map.of(randomAlphaOfLength(10), template), - rolloverConfiguration, - globalRetention - ); + var response = new GetComponentTemplateAction.Response(Map.of(randomAlphaOfLength(10), template), rolloverConfiguration); try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { builder.humanReadable(true); response.toXContent(builder, EMPTY_PARAMS); String serialized = Strings.toString(builder); assertThat(serialized, containsString("rollover")); - for (String label : rolloverConfiguration.resolveRolloverConditions(lifecycle.getEffectiveDataRetention(globalRetention)) + for (String label : rolloverConfiguration.resolveRolloverConditions(lifecycle.getEffectiveDataRetention(null)) .getConditions() .keySet()) { assertThat(serialized, containsString(label)); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateResponseTests.java index aa9989257aa39..c3deabd849998 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateResponseTests.java @@ -8,11 +8,13 @@ package org.elasticsearch.action.admin.indices.template.get; +import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; +import org.elasticsearch.action.admin.indices.rollover.RolloverConfigurationTests; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplateTests; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionTests; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.util.HashMap; @@ -26,19 +28,41 @@ protected Writeable.Reader instanceRe @Override protected GetComposableIndexTemplateAction.Response createTestInstance() { - DataStreamGlobalRetention globalRetention = randomBoolean() ? null : DataStreamGlobalRetentionTests.randomGlobalRetention(); + RolloverConfiguration rolloverConfiguration = randomBoolean() ? null : RolloverConfigurationTests.randomRolloverConditions(); if (randomBoolean()) { - return new GetComposableIndexTemplateAction.Response(Map.of(), globalRetention); + return new GetComposableIndexTemplateAction.Response(Map.of(), rolloverConfiguration); } Map templates = new HashMap<>(); for (int i = 0; i < randomIntBetween(1, 4); i++) { templates.put(randomAlphaOfLength(4), ComposableIndexTemplateTests.randomInstance()); } - return new GetComposableIndexTemplateAction.Response(templates, globalRetention); + return new GetComposableIndexTemplateAction.Response(templates, rolloverConfiguration); } @Override protected GetComposableIndexTemplateAction.Response mutateInstance(GetComposableIndexTemplateAction.Response instance) { - return randomValueOtherThan(instance, this::createTestInstance); + var rolloverConfiguration = instance.getRolloverConfiguration(); + var templates = instance.indexTemplates(); + switch (randomInt(1)) { + case 0 -> rolloverConfiguration = randomBoolean() || rolloverConfiguration == null + ? randomValueOtherThan(rolloverConfiguration, RolloverConfigurationTests::randomRolloverConditions) + : null; + case 1 -> { + var updatedTemplates = new HashMap(); + for (String name : templates.keySet()) { + if (randomBoolean()) { + updatedTemplates.put(name, templates.get(name)); + } + } + updatedTemplates.put(randomAlphaOfLength(4), ComposableIndexTemplateTests.randomInstance()); + templates = updatedTemplates; + } + } + return new GetComposableIndexTemplateAction.Response(templates, rolloverConfiguration); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(IndicesModule.getNamedWriteables()); } } From 06d09fc7aa3248595f4068fb68259f50f1754bac Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Mon, 19 Aug 2024 11:01:06 +0200 Subject: [PATCH 069/389] Add generated evaluators for DateNanos conversion functions (#111961) These are generated files, resulting from [this PR](https://github.com/elastic/elasticsearch/pull/111850). --- .../ToDatetimeFromDateNanosEvaluator.java | 122 ++++++++++++++++++ .../ToStringFromDateNanosEvaluator.java | 109 ++++++++++++++++ 2 files changed, 231 insertions(+) create mode 100644 x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeFromDateNanosEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromDateNanosEvaluator.java diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeFromDateNanosEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeFromDateNanosEvaluator.java new file mode 100644 index 0000000000000..92b629657b95b --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeFromDateNanosEvaluator.java @@ -0,0 +1,122 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.core.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ToDatetime}. + * This class is generated. Do not edit it. + */ +public final class ToDatetimeFromDateNanosEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public ToDatetimeFromDateNanosEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "ToDatetimeFromDateNanos"; + } + + @Override + public Block evalVector(Vector v) { + LongVector vector = (LongVector) v; + int positionCount = v.getPositionCount(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendLong(evalValue(vector, p)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static long evalValue(LongVector container, int index) { + long value = container.getLong(index); + return ToDatetime.fromDatenanos(value); + } + + @Override + public Block evalBlock(Block b) { + LongBlock block = (LongBlock) b; + int positionCount = block.getPositionCount(); + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + long value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendLong(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static long evalValue(LongBlock container, int index) { + long value = container.getLong(index); + return ToDatetime.fromDatenanos(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public ToDatetimeFromDateNanosEvaluator get(DriverContext context) { + return new ToDatetimeFromDateNanosEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "ToDatetimeFromDateNanosEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromDateNanosEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromDateNanosEvaluator.java new file mode 100644 index 0000000000000..37f13ea340a26 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromDateNanosEvaluator.java @@ -0,0 +1,109 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.core.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ToString}. + * This class is generated. Do not edit it. + */ +public final class ToStringFromDateNanosEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public ToStringFromDateNanosEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "ToStringFromDateNanos"; + } + + @Override + public Block evalVector(Vector v) { + LongVector vector = (LongVector) v; + int positionCount = v.getPositionCount(); + if (vector.isConstant()) { + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0), positionCount); + } + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + builder.appendBytesRef(evalValue(vector, p)); + } + return builder.build(); + } + } + + private static BytesRef evalValue(LongVector container, int index) { + long value = container.getLong(index); + return ToString.fromDateNanos(value); + } + + @Override + public Block evalBlock(Block b) { + LongBlock block = (LongBlock) b; + int positionCount = block.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + BytesRef value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendBytesRef(value); + valuesAppended = true; + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(LongBlock container, int index) { + long value = container.getLong(index); + return ToString.fromDateNanos(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public ToStringFromDateNanosEvaluator get(DriverContext context) { + return new ToStringFromDateNanosEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "ToStringFromDateNanosEvaluator[field=" + field + "]"; + } + } +} From d2e667004596544bbbb594c7d42e215483caf9c5 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 19 Aug 2024 11:57:07 +0100 Subject: [PATCH 070/389] Move repo analyzer to its own package (#111963) In preparation for adding more things to the blobstore testkit, this commit moves the repository analyzer implementation from `o.e.r.blobstore.testkit` to `o.e.r.blobstore.testkit.analyze`. --- .../AzureRepositoryAnalysisRestIT.java} | 4 ++-- .../GCSRepositoryAnalysisRestIT.java} | 4 ++-- .../AbstractHdfsRepositoryAnalysisRestIT.java} | 4 ++-- .../HdfsRepositoryAnalysisRestIT.java} | 4 ++-- .../SecureHdfsRepositoryAnalysisRestIT.java} | 4 ++-- .../MinioRepositoryAnalysisRestIT.java} | 4 ++-- x-pack/plugin/snapshot-repo-test-kit/qa/rest/build.gradle | 2 +- .../FsRepositoryAnalysisRestIT.java} | 5 ++--- .../S3RepositoryAnalysisRestIT.java} | 4 ++-- .../blobstore/testkit/{ => analyze}/BytesRegister.java | 2 +- .../{ => analyze}/RepositoryAnalysisFailureIT.java | 7 ++++--- .../{ => analyze}/RepositoryAnalysisSuccessIT.java | 7 ++++--- .../blobstore/testkit/SnapshotRepositoryTestKit.java | 5 ++++- .../testkit/{ => analyze}/BlobAnalyzeAction.java | 2 +- .../testkit/{ => analyze}/BlobWriteAbortedException.java | 2 +- .../{ => analyze}/ContendedRegisterAnalyzeAction.java | 2 +- .../testkit/{ => analyze}/GetBlobChecksumAction.java | 2 +- .../testkit/{ => analyze}/RandomBlobContent.java | 2 +- .../{ => analyze}/RandomBlobContentBytesReference.java | 2 +- .../testkit/{ => analyze}/RandomBlobContentStream.java | 2 +- .../testkit/{ => analyze}/RepositoryAnalyzeAction.java | 8 ++++---- .../{ => analyze}/RepositoryPerformanceSummary.java | 2 +- .../{ => analyze}/RestRepositoryAnalyzeAction.java | 2 +- .../{ => analyze}/UncontendedRegisterAnalyzeAction.java | 6 +++--- .../AbstractRepositoryAnalysisRestTestCase.java} | 4 ++-- .../RandomBlobContentBytesReferenceTests.java | 4 ++-- .../{ => analyze}/RandomBlobContentStreamTests.java | 4 ++-- .../{ => analyze}/RepositoryAnalyzeActionTests.java | 2 +- 28 files changed, 53 insertions(+), 49 deletions(-) rename x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/{AzureSnapshotRepoTestKitIT.java => analyze/AzureRepositoryAnalysisRestIT.java} (97%) rename x-pack/plugin/snapshot-repo-test-kit/qa/gcs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/{GCSSnapshotRepoTestKitIT.java => analyze/GCSRepositoryAnalysisRestIT.java} (95%) rename x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/{AbstractHdfsSnapshotRepoTestKitIT.java => analyze/AbstractHdfsRepositoryAnalysisRestIT.java} (86%) rename x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/{HdfsSnapshotRepoTestKitIT.java => analyze/HdfsRepositoryAnalysisRestIT.java} (90%) rename x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/{SecureHdfsSnapshotRepoTestKitIT.java => analyze/SecureHdfsRepositoryAnalysisRestIT.java} (93%) rename x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/{MinioSnapshotRepoTestKitIT.java => analyze/MinioRepositoryAnalysisRestIT.java} (93%) rename x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/java/org/elasticsearch/repositories/blobstore/testkit/{rest/FsSnapshotRepoTestKitIT.java => analyze/FsRepositoryAnalysisRestIT.java} (71%) rename x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/{S3SnapshotRepoTestKitIT.java => analyze/S3RepositoryAnalysisRestIT.java} (94%) rename x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/BytesRegister.java (93%) rename x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RepositoryAnalysisFailureIT.java (98%) rename x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RepositoryAnalysisSuccessIT.java (98%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/BlobAnalyzeAction.java (99%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/BlobWriteAbortedException.java (85%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/ContendedRegisterAnalyzeAction.java (99%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/GetBlobChecksumAction.java (99%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RandomBlobContent.java (98%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RandomBlobContentBytesReference.java (97%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RandomBlobContentStream.java (97%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RepositoryAnalyzeAction.java (99%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RepositoryPerformanceSummary.java (98%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RestRepositoryAnalyzeAction.java (98%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/UncontendedRegisterAnalyzeAction.java (96%) rename x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/{AbstractSnapshotRepoTestKitRestTestCase.java => analyze/AbstractRepositoryAnalysisRestTestCase.java} (90%) rename x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RandomBlobContentBytesReferenceTests.java (91%) rename x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RandomBlobContentStreamTests.java (97%) rename x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RepositoryAnalyzeActionTests.java (98%) diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AzureSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java similarity index 97% rename from x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AzureSnapshotRepoTestKitIT.java rename to x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java index 154b5bec54418..ecc8401e1d79a 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AzureSnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import fixture.azure.AzureHttpFixture; @@ -25,7 +25,7 @@ import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.not; -public class AzureSnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestTestCase { +public class AzureRepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTestCase { private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.azure.fixture", "true")); private static final boolean USE_HTTPS_FIXTURE = USE_FIXTURE && ESTestCase.inFipsJvm() == false; // TODO when https://github.com/elastic/elasticsearch/issues/111532 addressed, use a HTTPS fixture in FIPS mode too diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/gcs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/GCSSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/gcs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/GCSRepositoryAnalysisRestIT.java similarity index 95% rename from x-pack/plugin/snapshot-repo-test-kit/qa/gcs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/GCSSnapshotRepoTestKitIT.java rename to x-pack/plugin/snapshot-repo-test-kit/qa/gcs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/GCSRepositoryAnalysisRestIT.java index 95b6f4aed5221..7f7540d138825 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/gcs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/GCSSnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/gcs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/GCSRepositoryAnalysisRestIT.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import fixture.gcs.GoogleCloudStorageHttpFixture; import fixture.gcs.TestUtils; @@ -23,7 +23,7 @@ import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.not; -public class GCSSnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestTestCase { +public class GCSRepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTestCase { private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.google.fixture", "true")); private static GoogleCloudStorageHttpFixture fixture = new GoogleCloudStorageHttpFixture(USE_FIXTURE, "bucket", "o/oauth2/token"); diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AbstractHdfsSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AbstractHdfsRepositoryAnalysisRestIT.java similarity index 86% rename from x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AbstractHdfsSnapshotRepoTestKitIT.java rename to x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AbstractHdfsRepositoryAnalysisRestIT.java index 2810c4801e8dd..2aec22476d6cc 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AbstractHdfsSnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AbstractHdfsRepositoryAnalysisRestIT.java @@ -5,14 +5,14 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.common.settings.Settings; import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.not; -public abstract class AbstractHdfsSnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestTestCase { +public abstract class AbstractHdfsRepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTestCase { @Override protected String repositoryType() { diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/HdfsSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/HdfsRepositoryAnalysisRestIT.java similarity index 90% rename from x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/HdfsSnapshotRepoTestKitIT.java rename to x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/HdfsRepositoryAnalysisRestIT.java index e9787ecdce854..d60497949ff61 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/HdfsSnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/HdfsRepositoryAnalysisRestIT.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; @@ -17,7 +17,7 @@ import org.junit.rules.TestRule; @ThreadLeakFilters(filters = { HdfsClientThreadLeakFilter.class }) -public class HdfsSnapshotRepoTestKitIT extends AbstractHdfsSnapshotRepoTestKitIT { +public class HdfsRepositoryAnalysisRestIT extends AbstractHdfsRepositoryAnalysisRestIT { public static HdfsFixture hdfsFixture = new HdfsFixture(); diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/SecureHdfsSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/SecureHdfsRepositoryAnalysisRestIT.java similarity index 93% rename from x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/SecureHdfsSnapshotRepoTestKitIT.java rename to x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/SecureHdfsRepositoryAnalysisRestIT.java index 6d599e41e3b9f..dd388c0a79776 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/SecureHdfsSnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/SecureHdfsRepositoryAnalysisRestIT.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; @@ -22,7 +22,7 @@ import org.junit.rules.TestRule; @ThreadLeakFilters(filters = { HdfsClientThreadLeakFilter.class, TestContainersThreadFilter.class }) -public class SecureHdfsSnapshotRepoTestKitIT extends AbstractHdfsSnapshotRepoTestKitIT { +public class SecureHdfsRepositoryAnalysisRestIT extends AbstractHdfsRepositoryAnalysisRestIT { public static Krb5kDcContainer krb5Fixture = new Krb5kDcContainer(); diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/MinioSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/MinioRepositoryAnalysisRestIT.java similarity index 93% rename from x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/MinioSnapshotRepoTestKitIT.java rename to x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/MinioRepositoryAnalysisRestIT.java index 3e58a8d89ff31..b0068bd7bfdaf 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/MinioSnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/MinioRepositoryAnalysisRestIT.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; @@ -18,7 +18,7 @@ import org.junit.rules.TestRule; @ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) -public class MinioSnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestTestCase { +public class MinioRepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTestCase { public static final MinioTestContainer minioFixture = new MinioTestContainer(); diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/rest/build.gradle b/x-pack/plugin/snapshot-repo-test-kit/qa/rest/build.gradle index 17df249b08cf6..8a5dbca7dd0b2 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/rest/build.gradle +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/rest/build.gradle @@ -21,7 +21,7 @@ testClusters.matching { it.name == "yamlRestTest" }.configureEach { } tasks.named('yamlRestTestTestingConventions').configure { - baseClass 'org.elasticsearch.repositories.blobstore.testkit.AbstractSnapshotRepoTestKitRestTestCase' + baseClass 'org.elasticsearch.repositories.blobstore.testkit.analyze.AbstractRepositoryAnalysisRestTestCase' baseClass 'org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase' } diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/java/org/elasticsearch/repositories/blobstore/testkit/rest/FsSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/FsRepositoryAnalysisRestIT.java similarity index 71% rename from x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/java/org/elasticsearch/repositories/blobstore/testkit/rest/FsSnapshotRepoTestKitIT.java rename to x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/FsRepositoryAnalysisRestIT.java index 77dfb3902805a..7151b6e80a4d5 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/java/org/elasticsearch/repositories/blobstore/testkit/rest/FsSnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/FsRepositoryAnalysisRestIT.java @@ -5,13 +5,12 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit.rest; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.repositories.blobstore.testkit.AbstractSnapshotRepoTestKitRestTestCase; import org.elasticsearch.repositories.fs.FsRepository; -public class FsSnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestTestCase { +public class FsRepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTestCase { @Override protected String repositoryType() { diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/S3RepositoryAnalysisRestIT.java similarity index 94% rename from x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java rename to x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/S3RepositoryAnalysisRestIT.java index c38bd1204189f..8986cf1059191 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/S3RepositoryAnalysisRestIT.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import fixture.s3.S3HttpFixture; @@ -18,7 +18,7 @@ import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.not; -public class S3SnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestTestCase { +public class S3RepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTestCase { static final boolean USE_FIXTURE = Boolean.parseBoolean(System.getProperty("tests.use.fixture", "true")); diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/BytesRegister.java b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BytesRegister.java similarity index 93% rename from x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/BytesRegister.java rename to x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BytesRegister.java index 4303fff673359..3f5e406ac797b 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/BytesRegister.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BytesRegister.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalysisFailureIT.java similarity index 98% rename from x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java rename to x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalysisFailureIT.java index 73a90f247810e..e61f883abd60f 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalysisFailureIT.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -40,6 +40,7 @@ import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.blobstore.testkit.SnapshotRepositoryTestKit; import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; @@ -66,8 +67,8 @@ import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.MAX_RESTORE_BYTES_PER_SEC; import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.MAX_SNAPSHOT_BYTES_PER_SEC; -import static org.elasticsearch.repositories.blobstore.testkit.ContendedRegisterAnalyzeAction.bytesFromLong; -import static org.elasticsearch.repositories.blobstore.testkit.ContendedRegisterAnalyzeAction.longFromBytes; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.ContendedRegisterAnalyzeAction.bytesFromLong; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.ContendedRegisterAnalyzeAction.longFromBytes; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.anyOf; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalysisSuccessIT.java similarity index 98% rename from x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java rename to x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalysisSuccessIT.java index e4d9bf9041b4a..bb452ad2a64ce 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalysisSuccessIT.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.metadata.RepositoryMetadata; @@ -36,6 +36,7 @@ import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.blobstore.testkit.SnapshotRepositoryTestKit; import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ObjectPath; @@ -61,8 +62,8 @@ import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.MAX_RESTORE_BYTES_PER_SEC; import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.MAX_SNAPSHOT_BYTES_PER_SEC; -import static org.elasticsearch.repositories.blobstore.testkit.ContendedRegisterAnalyzeAction.longFromBytes; -import static org.elasticsearch.repositories.blobstore.testkit.RepositoryAnalysisFailureIT.isContendedRegisterKey; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.ContendedRegisterAnalyzeAction.longFromBytes; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.RepositoryAnalysisFailureIT.isContendedRegisterKey; import static org.elasticsearch.test.XContentTestUtils.convertToMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.allOf; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKit.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKit.java index 124174a2a025b..04d59906e6db3 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKit.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKit.java @@ -20,6 +20,8 @@ import org.elasticsearch.features.NodeFeature; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.blobstore.testkit.analyze.RepositoryAnalyzeAction; +import org.elasticsearch.repositories.blobstore.testkit.analyze.RestRepositoryAnalyzeAction; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.xcontent.XContentBuilder; @@ -51,7 +53,8 @@ public List getRestHandlers( return List.of(new RestRepositoryAnalyzeAction()); } - static void humanReadableNanos(XContentBuilder builder, String rawFieldName, String readableFieldName, long nanos) throws IOException { + public static void humanReadableNanos(XContentBuilder builder, String rawFieldName, String readableFieldName, long nanos) + throws IOException { assert rawFieldName.equals(readableFieldName) == false : rawFieldName + " vs " + readableFieldName; if (builder.humanReadable()) { diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BlobAnalyzeAction.java similarity index 99% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BlobAnalyzeAction.java index aa0cf3e3cfc1b..6007968d7cb4d 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BlobAnalyzeAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobWriteAbortedException.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BlobWriteAbortedException.java similarity index 85% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobWriteAbortedException.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BlobWriteAbortedException.java index 11c73993a3e6e..8a7bbb7255c5a 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobWriteAbortedException.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BlobWriteAbortedException.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; public class BlobWriteAbortedException extends RuntimeException { public BlobWriteAbortedException() { diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/ContendedRegisterAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/ContendedRegisterAnalyzeAction.java similarity index 99% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/ContendedRegisterAnalyzeAction.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/ContendedRegisterAnalyzeAction.java index 40cb4a45a0339..f527a46371641 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/ContendedRegisterAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/ContendedRegisterAnalyzeAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/GetBlobChecksumAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/GetBlobChecksumAction.java similarity index 99% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/GetBlobChecksumAction.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/GetBlobChecksumAction.java index f706ff79bf073..816f9e860a33a 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/GetBlobChecksumAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/GetBlobChecksumAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContent.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContent.java similarity index 98% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContent.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContent.java index aa9125f214f58..d5061b303f93d 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContent.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContent.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.repositories.RepositoryVerificationException; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentBytesReference.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentBytesReference.java similarity index 97% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentBytesReference.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentBytesReference.java index 44627000a2de9..eee40992cb0d7 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentBytesReference.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentBytesReference.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentStream.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentStream.java similarity index 97% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentStream.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentStream.java index c6163a7ffd82d..15fa370c5fe0a 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentStream.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentStream.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import java.io.InputStream; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalyzeAction.java similarity index 99% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalyzeAction.java index 30c2d0a89e0ee..5ced0176a4f81 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalyzeAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -83,10 +83,10 @@ import java.util.stream.IntStream; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.repositories.blobstore.testkit.BlobAnalyzeAction.MAX_ATOMIC_WRITE_SIZE; -import static org.elasticsearch.repositories.blobstore.testkit.ContendedRegisterAnalyzeAction.bytesFromLong; -import static org.elasticsearch.repositories.blobstore.testkit.ContendedRegisterAnalyzeAction.longFromBytes; import static org.elasticsearch.repositories.blobstore.testkit.SnapshotRepositoryTestKit.humanReadableNanos; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.BlobAnalyzeAction.MAX_ATOMIC_WRITE_SIZE; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.ContendedRegisterAnalyzeAction.bytesFromLong; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.ContendedRegisterAnalyzeAction.longFromBytes; /** * Action which distributes a bunch of {@link BlobAnalyzeAction}s over the nodes in the cluster, with limited concurrency, and collects diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryPerformanceSummary.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryPerformanceSummary.java similarity index 98% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryPerformanceSummary.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryPerformanceSummary.java index 3ee8805480023..c2625285a8912 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryPerformanceSummary.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryPerformanceSummary.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RestRepositoryAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RestRepositoryAnalyzeAction.java similarity index 98% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RestRepositoryAnalyzeAction.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RestRepositoryAnalyzeAction.java index 2a549db8b3255..b0f6b01936ffa 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RestRepositoryAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RestRepositoryAnalyzeAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/UncontendedRegisterAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/UncontendedRegisterAnalyzeAction.java similarity index 96% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/UncontendedRegisterAnalyzeAction.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/UncontendedRegisterAnalyzeAction.java index 1986b47e3188c..23c25e466b917 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/UncontendedRegisterAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/UncontendedRegisterAnalyzeAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -38,8 +38,8 @@ import java.io.IOException; import java.util.Map; -import static org.elasticsearch.repositories.blobstore.testkit.ContendedRegisterAnalyzeAction.bytesFromLong; -import static org.elasticsearch.repositories.blobstore.testkit.ContendedRegisterAnalyzeAction.longFromBytes; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.ContendedRegisterAnalyzeAction.bytesFromLong; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.ContendedRegisterAnalyzeAction.longFromBytes; class UncontendedRegisterAnalyzeAction extends HandledTransportAction { diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/AbstractSnapshotRepoTestKitRestTestCase.java b/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AbstractRepositoryAnalysisRestTestCase.java similarity index 90% rename from x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/AbstractSnapshotRepoTestKitRestTestCase.java rename to x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AbstractRepositoryAnalysisRestTestCase.java index 3af8c118803a7..2c96003f7e3d3 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/AbstractSnapshotRepoTestKitRestTestCase.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AbstractRepositoryAnalysisRestTestCase.java @@ -5,14 +5,14 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.apache.http.client.methods.HttpPost; import org.elasticsearch.client.Request; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.rest.ESRestTestCase; -public abstract class AbstractSnapshotRepoTestKitRestTestCase extends ESRestTestCase { +public abstract class AbstractRepositoryAnalysisRestTestCase extends ESRestTestCase { protected abstract String repositoryType(); diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentBytesReferenceTests.java b/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentBytesReferenceTests.java similarity index 91% rename from x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentBytesReferenceTests.java rename to x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentBytesReferenceTests.java index c85b634083faf..29a6253c031d8 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentBytesReferenceTests.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentBytesReferenceTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.test.ESTestCase; @@ -13,7 +13,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.zip.CRC32; -import static org.elasticsearch.repositories.blobstore.testkit.RandomBlobContent.BUFFER_SIZE; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.RandomBlobContent.BUFFER_SIZE; import static org.hamcrest.Matchers.equalTo; public class RandomBlobContentBytesReferenceTests extends ESTestCase { diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentStreamTests.java b/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentStreamTests.java similarity index 97% rename from x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentStreamTests.java rename to x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentStreamTests.java index 6c353e0937a33..1854d98f7ec79 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentStreamTests.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentStreamTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.test.ESTestCase; @@ -14,7 +14,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.zip.CRC32; -import static org.elasticsearch.repositories.blobstore.testkit.RandomBlobContent.BUFFER_SIZE; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.RandomBlobContent.BUFFER_SIZE; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeActionTests.java b/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalyzeActionTests.java similarity index 98% rename from x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeActionTests.java rename to x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalyzeActionTests.java index 0d1bdc86002b4..44770e68d714b 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeActionTests.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalyzeActionTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.test.ESTestCase; From ca6d41ce2093989dad829ddfe053a1194e0d0b7a Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 19 Aug 2024 12:04:55 +0100 Subject: [PATCH 071/389] Fail `indexDocs()` on rejection (#111962) In 9dc59e29 we relaxed the `indexDocs()` test utility to retry on rejections caused by exceeding the write queue length limit, but then we massively relaxed this limit in #59559. We should not be seeing such rejections any more, so we can revert this special handling and strengthen the tests to assert that the indexing process encounters no failures at all. --- .../elasticsearch/test/ESIntegTestCase.java | 38 +------------------ 1 file changed, 2 insertions(+), 36 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index aad3dcc457241..fa686a0bc753a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -21,7 +21,6 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.TotalHits; import org.apache.lucene.tests.util.LuceneTestCase; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -101,7 +100,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.MockBigArrays; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.XContentHelper; @@ -109,7 +107,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.Tuple; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.gateway.PersistedClusterStateService; @@ -186,7 +183,6 @@ import java.util.Random; import java.util.Set; import java.util.concurrent.Callable; -import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; @@ -212,7 +208,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; @@ -1735,7 +1730,6 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma } } Collections.shuffle(builders, random()); - final CopyOnWriteArrayList> errors = new CopyOnWriteArrayList<>(); List inFlightAsyncOperations = new ArrayList<>(); // If you are indexing just a few documents then frequently do it one at a time. If many then frequently in bulk. final String[] indicesArray = indices.toArray(new String[] {}); @@ -1744,7 +1738,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), true, false); for (IndexRequestBuilder indexRequestBuilder : builders) { indexRequestBuilder.execute( - new PayloadLatchedActionListener<>(indexRequestBuilder, newLatch(inFlightAsyncOperations), errors) + new LatchedActionListener(newLatch(inFlightAsyncOperations)).delegateResponse((l, e) -> fail(e)) ); postIndexAsyncActions(indicesArray, inFlightAsyncOperations, maybeFlush); } @@ -1771,19 +1765,8 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma } } for (CountDownLatch operation : inFlightAsyncOperations) { - operation.await(); - } - final List actualErrors = new ArrayList<>(); - for (Tuple tuple : errors) { - Throwable t = ExceptionsHelper.unwrapCause(tuple.v2()); - if (t instanceof EsRejectedExecutionException) { - logger.debug("Error indexing doc: " + t.getMessage() + ", reindexing."); - tuple.v1().get(); // re-index if rejected - } else { - actualErrors.add(tuple.v2()); - } + safeAwait(operation); } - assertThat(actualErrors, emptyIterable()); if (bogusIds.isEmpty() == false) { // delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs! for (List doc : bogusIds) { @@ -1957,23 +1940,6 @@ protected void addError(Exception e) {} } - private class PayloadLatchedActionListener extends LatchedActionListener { - private final CopyOnWriteArrayList> errors; - private final T builder; - - PayloadLatchedActionListener(T builder, CountDownLatch latch, CopyOnWriteArrayList> errors) { - super(latch); - this.errors = errors; - this.builder = builder; - } - - @Override - protected void addError(Exception e) { - errors.add(new Tuple<>(builder, e)); - } - - } - /** * Clears the given scroll Ids */ From 7bf730a88f5b1e324d3afbd077cc400ab092de92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Mon, 19 Aug 2024 15:55:58 +0200 Subject: [PATCH 072/389] Fix: HierarchyCircuitBreakerTelemetryTests testCircuitBreakerTripCountMetric failing (#111831) * Cleanup code and teardown for testCircuitBreakerTripCountMetric * Move to a more appropriate location --- muted-tests.yml | 3 - .../HierarchyCircuitBreakerTelemetryIT.java} | 101 +++++------------- 2 files changed, 24 insertions(+), 80 deletions(-) rename server/src/{test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java => internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java} (58%) diff --git a/muted-tests.yml b/muted-tests.yml index 22adc4a8c44b5..dd4dd2c7f2ec7 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -137,9 +137,6 @@ tests: - class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT method: testSnapshotRestore {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111799 -- class: org.elasticsearch.indices.breaker.HierarchyCircuitBreakerTelemetryTests - method: testCircuitBreakerTripCountMetric - issue: https://github.com/elastic/elasticsearch/issues/111778 - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {comparison.RangeVersion SYNC} issue: https://github.com/elastic/elasticsearch/issues/111814 diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java similarity index 58% rename from server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java rename to server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java index 2cbe1202520df..ff2117ea93bb9 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java @@ -6,25 +6,23 @@ * Side Public License, v 1. */ -package org.elasticsearch.indices.breaker; +package org.elasticsearch.indices.memory.breaker; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; +import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.telemetry.Measurement; -import org.elasticsearch.telemetry.RecordingInstruments; -import org.elasticsearch.telemetry.RecordingMeterRegistry; import org.elasticsearch.telemetry.TestTelemetryPlugin; -import org.elasticsearch.telemetry.metric.LongCounter; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; +import org.junit.After; -import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; @@ -41,54 +39,11 @@ import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, supportsDedicatedMasters = true) -public class HierarchyCircuitBreakerTelemetryTests extends ESIntegTestCase { +public class HierarchyCircuitBreakerTelemetryIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return List.of(TestCircuitBreakerTelemetryPlugin.class); - } - - public static class TestCircuitBreakerTelemetryPlugin extends TestTelemetryPlugin { - protected final MeterRegistry meter = new RecordingMeterRegistry() { - private final LongCounter tripCount = new RecordingInstruments.RecordingLongCounter( - CircuitBreakerMetrics.ES_BREAKER_TRIP_COUNT_TOTAL, - recorder - ) { - @Override - public void incrementBy(long inc) { - throw new UnsupportedOperationException(); - } - - @Override - public void incrementBy(long inc, Map attributes) { - throw new UnsupportedOperationException(); - } - }; - - @Override - protected LongCounter buildLongCounter(String name, String description, String unit) { - if (name.equals(tripCount.getName())) { - return tripCount; - } - throw new IllegalArgumentException("Unknown counter metric name [" + name + "]"); - } - - @Override - public LongCounter registerLongCounter(String name, String description, String unit) { - assertCircuitBreakerName(name); - return super.registerLongCounter(name, description, unit); - } - - @Override - public LongCounter getLongCounter(String name) { - assertCircuitBreakerName(name); - return super.getLongCounter(name); - } - - private void assertCircuitBreakerName(final String name) { - assertThat(name, Matchers.oneOf(CircuitBreakerMetrics.ES_BREAKER_TRIP_COUNT_TOTAL)); - } - }; + return List.of(TestTelemetryPlugin.class); } public void testCircuitBreakerTripCountMetric() { @@ -142,37 +97,29 @@ public void testCircuitBreakerTripCountMetric() { fail("Expected exception not thrown"); } - private List getMeasurements(String dataNodeName) { - final TestTelemetryPlugin dataNodeTelemetryPlugin = internalCluster().getInstance(PluginsService.class, dataNodeName) - .filterPlugins(TestCircuitBreakerTelemetryPlugin.class) + @After + public void resetClusterSetting() { + final var circuitBreakerSettings = Settings.builder() + .putNull(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()) + .putNull(FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey()) + .putNull(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()) + .putNull(REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey()) + .putNull(IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()) + .putNull(IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey()) + .putNull(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()) + .putNull(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey()); + updateClusterSettings(circuitBreakerSettings); + } + + private List getMeasurements(String nodeName) { + final TestTelemetryPlugin telemetryPlugin = internalCluster().getInstance(PluginsService.class, nodeName) + .filterPlugins(TestTelemetryPlugin.class) .toList() .get(0); return Measurement.combine( - Stream.of(dataNodeTelemetryPlugin.getLongCounterMeasurement(CircuitBreakerMetrics.ES_BREAKER_TRIP_COUNT_TOTAL).stream()) + Stream.of(telemetryPlugin.getLongCounterMeasurement(CircuitBreakerMetrics.ES_BREAKER_TRIP_COUNT_TOTAL).stream()) .flatMap(Function.identity()) .toList() ); } - - // Make sure circuit breaker telemetry on trip count reports the same values as circuit breaker stats - private void assertCircuitBreakerTripCount( - final HierarchyCircuitBreakerService circuitBreakerService, - final String circuitBreakerName, - int firstBytesEstimate, - int secondBytesEstimate, - long expectedTripCountValue - ) { - try { - circuitBreakerService.getBreaker(circuitBreakerName).addEstimateBytesAndMaybeBreak(firstBytesEstimate, randomAlphaOfLength(5)); - circuitBreakerService.getBreaker(circuitBreakerName).addEstimateBytesAndMaybeBreak(secondBytesEstimate, randomAlphaOfLength(5)); - } catch (final CircuitBreakingException cbex) { - final CircuitBreakerStats circuitBreakerStats = Arrays.stream(circuitBreakerService.stats().getAllStats()) - .filter(stats -> circuitBreakerName.equals(stats.getName())) - .findAny() - .get(); - assertThat(circuitBreakerService.getBreaker(circuitBreakerName).getTrippedCount(), Matchers.equalTo(expectedTripCountValue)); - assertThat(circuitBreakerStats.getTrippedCount(), Matchers.equalTo(expectedTripCountValue)); - } - } - } From 8b0a1aa7ebac47af865f4fbc732cc4a09835906a Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Mon, 19 Aug 2024 16:14:09 +0200 Subject: [PATCH 073/389] [cache] Support async RangeMissingHandler callbacks (#111340) (#111896) Change `fillCacheRange` method to accept a completion listener that must be called by `RangeMissingHandler` implementations when they finish fetching data. By doing so, we support asynchronously fetching the data from a third party storage. We also support asynchronous `SourceInputStreamFactory` for reading gaps from the storage. --- .../shared/SharedBlobCacheService.java | 101 +++++--- .../shared/SharedBlobCacheServiceTests.java | 216 ++++++++++++------ .../store/input/FrozenIndexInput.java | 59 ++--- 3 files changed, 253 insertions(+), 123 deletions(-) diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 3242a02dff525..8ca62a3b95023 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -650,13 +650,14 @@ private RangeMissingHandler writerWithOffset(RangeMissingHandler writer, int wri // no need to allocate a new capturing lambda if the offset isn't adjusted return writer; } - return (channel, channelPos, streamFactory, relativePos, len, progressUpdater) -> writer.fillCacheRange( + return (channel, channelPos, streamFactory, relativePos, len, progressUpdater, completionListener) -> writer.fillCacheRange( channel, channelPos, streamFactory, relativePos - writeOffset, len, - progressUpdater + progressUpdater, + completionListener ); } @@ -991,16 +992,17 @@ void populateAndRead( executor.execute(fillGapRunnable(gap, writer, null, refs.acquireListener())); } } else { - final List gapFillingTasks = gaps.stream() - .map(gap -> fillGapRunnable(gap, writer, streamFactory, refs.acquireListener())) - .toList(); - executor.execute(() -> { - try (streamFactory) { + var gapFillingListener = refs.acquireListener(); + try (var gfRefs = new RefCountingRunnable(ActionRunnable.run(gapFillingListener, streamFactory::close))) { + final List gapFillingTasks = gaps.stream() + .map(gap -> fillGapRunnable(gap, writer, streamFactory, gfRefs.acquireListener())) + .toList(); + executor.execute(() -> { // Fill the gaps in order. If a gap fails to fill for whatever reason, the task for filling the next // gap will still be executed. gapFillingTasks.forEach(Runnable::run); - } - }); + }); + } } } } @@ -1009,13 +1011,13 @@ void populateAndRead( } } - private AbstractRunnable fillGapRunnable( + private Runnable fillGapRunnable( SparseFileTracker.Gap gap, RangeMissingHandler writer, @Nullable SourceInputStreamFactory streamFactory, ActionListener listener ) { - return ActionRunnable.run(listener.delegateResponse((l, e) -> failGapAndListener(gap, l, e)), () -> { + return () -> ActionListener.run(listener, l -> { var ioRef = io; assert regionOwners.get(ioRef) == CacheFileRegion.this; assert CacheFileRegion.this.hasReferences() : CacheFileRegion.this; @@ -1026,10 +1028,15 @@ private AbstractRunnable fillGapRunnable( streamFactory, start, Math.toIntExact(gap.end() - start), - progress -> gap.onProgress(start + progress) + progress -> gap.onProgress(start + progress), + l.map(unused -> { + assert regionOwners.get(ioRef) == CacheFileRegion.this; + assert CacheFileRegion.this.hasReferences() : CacheFileRegion.this; + writeCount.increment(); + gap.onCompletion(); + return null; + }).delegateResponse((delegate, e) -> failGapAndListener(gap, delegate, e)) ); - writeCount.increment(); - gap.onCompletion(); }); } @@ -1117,12 +1124,23 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int length, - IntConsumer progressUpdater + IntConsumer progressUpdater, + ActionListener completionListener ) throws IOException { - writer.fillCacheRange(channel, channelPos, streamFactory, relativePos, length, progressUpdater); - var elapsedTime = TimeUnit.NANOSECONDS.toMillis(relativeTimeInNanosSupplier.getAsLong() - startTime); - SharedBlobCacheService.this.blobCacheMetrics.getCacheMissLoadTimes().record(elapsedTime); - SharedBlobCacheService.this.blobCacheMetrics.getCacheMissCounter().increment(); + writer.fillCacheRange( + channel, + channelPos, + streamFactory, + relativePos, + length, + progressUpdater, + completionListener.map(unused -> { + var elapsedTime = TimeUnit.NANOSECONDS.toMillis(relativeTimeInNanosSupplier.getAsLong() - startTime); + blobCacheMetrics.getCacheMissLoadTimes().record(elapsedTime); + blobCacheMetrics.getCacheMissCounter().increment(); + return null; + }) + ); } }; if (rangeToRead.isEmpty()) { @@ -1215,9 +1233,18 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int len, - IntConsumer progressUpdater + IntConsumer progressUpdater, + ActionListener completionListener ) throws IOException { - delegate.fillCacheRange(channel, channelPos, streamFactory, relativePos - writeOffset, len, progressUpdater); + delegate.fillCacheRange( + channel, + channelPos, + streamFactory, + relativePos - writeOffset, + len, + progressUpdater, + completionListener + ); } }; } @@ -1230,14 +1257,25 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int len, - IntConsumer progressUpdater + IntConsumer progressUpdater, + ActionListener completionListener ) throws IOException { assert assertValidRegionAndLength(fileRegion, channelPos, len); - delegate.fillCacheRange(channel, channelPos, streamFactory, relativePos, len, progressUpdater); - assert regionOwners.get(fileRegion.io) == fileRegion - : "File chunk [" + fileRegion.regionKey + "] no longer owns IO [" + fileRegion.io + "]"; + delegate.fillCacheRange( + channel, + channelPos, + streamFactory, + relativePos, + len, + progressUpdater, + Assertions.ENABLED ? ActionListener.runBefore(completionListener, () -> { + assert regionOwners.get(fileRegion.io) == fileRegion + : "File chunk [" + fileRegion.regionKey + "] no longer owns IO [" + fileRegion.io + "]"; + }) : completionListener + ); } }; + } return adjustedWriter; } @@ -1324,6 +1362,7 @@ default SourceInputStreamFactory sharedInputStreamFactory(List completionListener ) throws IOException; } @@ -1343,9 +1383,9 @@ public interface SourceInputStreamFactory extends Releasable { /** * Create the input stream at the specified position. * @param relativePos the relative position in the remote storage to read from. - * @return the input stream ready to be read from. + * @param listener listener for the input stream ready to be read from. */ - InputStream create(int relativePos) throws IOException; + void create(int relativePos, ActionListener listener) throws IOException; } private abstract static class DelegatingRangeMissingHandler implements RangeMissingHandler { @@ -1367,9 +1407,10 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int length, - IntConsumer progressUpdater + IntConsumer progressUpdater, + ActionListener completionListener ) throws IOException { - delegate.fillCacheRange(channel, channelPos, streamFactory, relativePos, length, progressUpdater); + delegate.fillCacheRange(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener); } } diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java index e477673c90d6d..6c49b50c06e82 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.StoppableExecutorServiceWrapper; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.TestEnvironment; @@ -72,6 +73,13 @@ private static long size(long numPages) { return numPages * SharedBytes.PAGE_SIZE; } + private static void completeWith(ActionListener listener, CheckedRunnable runnable) { + ActionListener.completeWith(listener, () -> { + runnable.run(); + return null; + }); + } + public void testBasicEviction() throws IOException { Settings settings = Settings.builder() .put(NODE_NAME_SETTING.getKey(), "node") @@ -115,7 +123,10 @@ public void testBasicEviction() throws IOException { ByteRange.of(0L, 1L), ByteRange.of(0L, 1L), (channel, channelPos, relativePos, length) -> 1, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> progressUpdater.accept(length), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> progressUpdater.accept(length) + ), taskQueue.getThreadPool().generic(), bytesReadFuture ); @@ -552,11 +563,14 @@ public void execute(Runnable command) { cacheService.maybeFetchFullEntry( cacheKey, size, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - assert streamFactory == null : streamFactory; - bytesRead.addAndGet(-length); - progressUpdater.accept(length); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + assert streamFactory == null : streamFactory; + bytesRead.addAndGet(-length); + progressUpdater.accept(length); + } + ), bulkExecutor, future ); @@ -570,9 +584,15 @@ public void execute(Runnable command) { // a download that would use up all regions should not run final var cacheKey = generateCacheKey(); assertEquals(2, cacheService.freeRegionCount()); - var configured = cacheService.maybeFetchFullEntry(cacheKey, size(500), (ch, chPos, streamFactory, relPos, len, update) -> { - throw new AssertionError("Should never reach here"); - }, bulkExecutor, ActionListener.noop()); + var configured = cacheService.maybeFetchFullEntry( + cacheKey, + size(500), + (ch, chPos, streamFactory, relPos, len, update, completionListener) -> completeWith(completionListener, () -> { + throw new AssertionError("Should never reach here"); + }), + bulkExecutor, + ActionListener.noop() + ); assertFalse(configured); assertEquals(2, cacheService.freeRegionCount()); } @@ -613,9 +633,14 @@ public void testFetchFullCacheEntryConcurrently() throws Exception { (ActionListener listener) -> cacheService.maybeFetchFullEntry( cacheKey, size, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> progressUpdater.accept( - length - ), + ( + channel, + channelPos, + streamFactory, + relativePos, + length, + progressUpdater, + completionListener) -> completeWith(completionListener, () -> progressUpdater.accept(length)), bulkExecutor, listener ) @@ -859,7 +884,10 @@ public void testMaybeEvictLeastUsed() throws Exception { var entry = cacheService.get(cacheKey, regionSize, 0); entry.populate( ByteRange.of(0L, regionSize), - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> progressUpdater.accept(length), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> progressUpdater.accept(length) + ), taskQueue.getThreadPool().generic(), ActionListener.noop() ); @@ -954,11 +982,14 @@ public void execute(Runnable command) { cacheKey, 0, blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - assert streamFactory == null : streamFactory; - bytesRead.addAndGet(length); - progressUpdater.accept(length); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + assert streamFactory == null : streamFactory; + bytesRead.addAndGet(length); + progressUpdater.accept(length); + } + ), bulkExecutor, future ); @@ -985,11 +1016,14 @@ public void execute(Runnable command) { cacheKey, region, blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - assert streamFactory == null : streamFactory; - bytesRead.addAndGet(length); - progressUpdater.accept(length); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + assert streamFactory == null : streamFactory; + bytesRead.addAndGet(length); + progressUpdater.accept(length); + } + ), bulkExecutor, listener ); @@ -1010,9 +1044,12 @@ public void execute(Runnable command) { cacheKey, randomIntBetween(0, 10), randomLongBetween(1L, regionSize), - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - throw new AssertionError("should not be executed"); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + throw new AssertionError("should not be executed"); + } + ), bulkExecutor, future ); @@ -1032,11 +1069,14 @@ public void execute(Runnable command) { cacheKey, 0, blobLength, - (channel, channelPos, ignore, relativePos, length, progressUpdater) -> { - assert ignore == null : ignore; - bytesRead.addAndGet(length); - progressUpdater.accept(length); - }, + (channel, channelPos, ignore, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + assert ignore == null : ignore; + bytesRead.addAndGet(length); + progressUpdater.accept(length); + } + ), bulkExecutor, future ); @@ -1110,12 +1150,15 @@ public void execute(Runnable command) { region, range, blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - assertThat(range.start() + relativePos, equalTo(cacheService.getRegionStart(region) + regionRange.start())); - assertThat(channelPos, equalTo(Math.toIntExact(regionRange.start()))); - assertThat(length, equalTo(Math.toIntExact(regionRange.length()))); - bytesCopied.addAndGet(length); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + assertThat(range.start() + relativePos, equalTo(cacheService.getRegionStart(region) + regionRange.start())); + assertThat(channelPos, equalTo(Math.toIntExact(regionRange.start()))); + assertThat(length, equalTo(Math.toIntExact(regionRange.length()))); + bytesCopied.addAndGet(length); + } + ), bulkExecutor, future ); @@ -1150,7 +1193,10 @@ public void execute(Runnable command) { region, ByteRange.of(0L, blobLength), blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> bytesCopied.addAndGet(length), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> bytesCopied.addAndGet(length) + ), bulkExecutor, listener ); @@ -1173,9 +1219,12 @@ public void execute(Runnable command) { randomIntBetween(0, 10), ByteRange.of(0L, blobLength), blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - throw new AssertionError("should not be executed"); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + throw new AssertionError("should not be executed"); + } + ), bulkExecutor, future ); @@ -1196,7 +1245,10 @@ public void execute(Runnable command) { 0, ByteRange.of(0L, blobLength), blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> bytesCopied.addAndGet(length), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> bytesCopied.addAndGet(length) + ), bulkExecutor, future ); @@ -1237,10 +1289,18 @@ public void testPopulate() throws Exception { var entry = cacheService.get(cacheKey, blobLength, 0); AtomicLong bytesWritten = new AtomicLong(0L); final PlainActionFuture future1 = new PlainActionFuture<>(); - entry.populate(ByteRange.of(0, regionSize - 1), (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - bytesWritten.addAndGet(length); - progressUpdater.accept(length); - }, taskQueue.getThreadPool().generic(), future1); + entry.populate( + ByteRange.of(0, regionSize - 1), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + bytesWritten.addAndGet(length); + progressUpdater.accept(length); + } + ), + taskQueue.getThreadPool().generic(), + future1 + ); assertThat(future1.isDone(), is(false)); assertThat(taskQueue.hasRunnableTasks(), is(true)); @@ -1248,18 +1308,34 @@ public void testPopulate() throws Exception { // start populating the second region entry = cacheService.get(cacheKey, blobLength, 1); final PlainActionFuture future2 = new PlainActionFuture<>(); - entry.populate(ByteRange.of(0, regionSize - 1), (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - bytesWritten.addAndGet(length); - progressUpdater.accept(length); - }, taskQueue.getThreadPool().generic(), future2); + entry.populate( + ByteRange.of(0, regionSize - 1), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + bytesWritten.addAndGet(length); + progressUpdater.accept(length); + } + ), + taskQueue.getThreadPool().generic(), + future2 + ); // start populating again the first region, listener should be called immediately entry = cacheService.get(cacheKey, blobLength, 0); final PlainActionFuture future3 = new PlainActionFuture<>(); - entry.populate(ByteRange.of(0, regionSize - 1), (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - bytesWritten.addAndGet(length); - progressUpdater.accept(length); - }, taskQueue.getThreadPool().generic(), future3); + entry.populate( + ByteRange.of(0, regionSize - 1), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + bytesWritten.addAndGet(length); + progressUpdater.accept(length); + } + ), + taskQueue.getThreadPool().generic(), + future3 + ); assertThat(future3.isDone(), is(true)); var written = future3.get(10L, TimeUnit.SECONDS); @@ -1377,7 +1453,10 @@ public void testSharedSourceInputStreamFactory() throws Exception { range, range, (channel, channelPos, relativePos, length) -> length, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> progressUpdater.accept(length), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> progressUpdater.accept(length) + ), EsExecutors.DIRECT_EXECUTOR_SERVICE, future ); @@ -1394,8 +1473,8 @@ public void testSharedSourceInputStreamFactory() throws Exception { final var factoryClosed = new AtomicBoolean(false); final var dummyStreamFactory = new SourceInputStreamFactory() { @Override - public InputStream create(int relativePos) { - return null; + public void create(int relativePos, ActionListener listener) { + listener.onResponse(null); } @Override @@ -1420,17 +1499,20 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int length, - IntConsumer progressUpdater + IntConsumer progressUpdater, + ActionListener completion ) throws IOException { - if (invocationCounter.incrementAndGet() == 1) { - final Thread witness = invocationThread.compareAndExchange(null, Thread.currentThread()); - assertThat(witness, nullValue()); - } else { - assertThat(invocationThread.get(), sameInstance(Thread.currentThread())); - } - assertThat(streamFactory, sameInstance(dummyStreamFactory)); - assertThat(position.getAndSet(relativePos), lessThan(relativePos)); - progressUpdater.accept(length); + completeWith(completion, () -> { + if (invocationCounter.incrementAndGet() == 1) { + final Thread witness = invocationThread.compareAndExchange(null, Thread.currentThread()); + assertThat(witness, nullValue()); + } else { + assertThat(invocationThread.get(), sameInstance(Thread.currentThread())); + } + assertThat(streamFactory, sameInstance(dummyStreamFactory)); + assertThat(position.getAndSet(relativePos), lessThan(relativePos)); + progressUpdater.accept(length); + }); } }; diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java index 56efc72f2f6f7..d7cf22a05981f 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.blobcache.BlobCacheUtils; import org.elasticsearch.blobcache.common.ByteBufferReference; import org.elasticsearch.blobcache.common.ByteRange; @@ -146,32 +147,38 @@ private void readWithoutBlobCacheSlow(ByteBuffer b, long position, int length) t final int read = SharedBytes.readCacheFile(channel, pos, relativePos, len, byteBufferReference); stats.addCachedBytesRead(read); return read; - }, (channel, channelPos, streamFactory, relativePos, len, progressUpdater) -> { - assert streamFactory == null : streamFactory; - final long startTimeNanos = stats.currentTimeNanos(); - try (InputStream input = openInputStreamFromBlobStore(rangeToWrite.start() + relativePos, len)) { - assert ThreadPool.assertCurrentThreadPool(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME); - logger.trace( - "{}: writing channel {} pos {} length {} (details: {})", - fileInfo.physicalName(), - channelPos, - relativePos, - len, - cacheFile - ); - SharedBytes.copyToCacheFileAligned( - channel, - input, - channelPos, - relativePos, - len, - progressUpdater, - writeBuffer.get().clear() - ); - final long endTimeNanos = stats.currentTimeNanos(); - stats.addCachedBytesWritten(len, endTimeNanos - startTimeNanos); - } - }); + }, + (channel, channelPos, streamFactory, relativePos, len, progressUpdater, completionListener) -> ActionListener.completeWith( + completionListener, + () -> { + assert streamFactory == null : streamFactory; + final long startTimeNanos = stats.currentTimeNanos(); + try (InputStream input = openInputStreamFromBlobStore(rangeToWrite.start() + relativePos, len)) { + assert ThreadPool.assertCurrentThreadPool(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME); + logger.trace( + "{}: writing channel {} pos {} length {} (details: {})", + fileInfo.physicalName(), + channelPos, + relativePos, + len, + cacheFile + ); + SharedBytes.copyToCacheFileAligned( + channel, + input, + channelPos, + relativePos, + len, + progressUpdater, + writeBuffer.get().clear() + ); + final long endTimeNanos = stats.currentTimeNanos(); + stats.addCachedBytesWritten(len, endTimeNanos - startTimeNanos); + return null; + } + } + ) + ); assert bytesRead == length : bytesRead + " vs " + length; byteBufferReference.finish(bytesRead); } finally { From cf034c03df532ef353ff5d09f3cccbf109af53d6 Mon Sep 17 00:00:00 2001 From: Kathleen DeRusso Date: Mon, 19 Aug 2024 10:55:34 -0400 Subject: [PATCH 074/389] Add a new random rerank retriever (#111851) * Add a new random rerank retriever, that reranks results in random order without requiring inference * Update docs/changelog/111851.yaml * PR feedback - remove null checks for field as it can never be null * Update docs * Revert "Update docs" This reverts commit 3d61676e8c9ab76472f824554efd607ddd1c5678. * Remove minScore * Random seed * Delete docs/changelog/111851.yaml * PR feedback * Add optional seed to request, YAML test * PR feedback --- .../org/elasticsearch/TransportVersions.java | 1 + .../xpack/inference/InferenceFeatures.java | 6 +- .../xpack/inference/InferencePlugin.java | 6 +- .../rank/random/RandomRankBuilder.java | 165 ++++++++++++++++++ ...ankFeaturePhaseRankCoordinatorContext.java | 55 ++++++ .../random/RandomRankRetrieverBuilder.java | 124 +++++++++++++ .../rank/random/RandomRankBuilderTests.java | 70 ++++++++ .../RandomRankRetrieverBuilderTests.java | 104 +++++++++++ .../inference/80_random_rerank_retriever.yml | 94 ++++++++++ 9 files changed, 623 insertions(+), 2 deletions(-) create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilder.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankFeaturePhaseRankCoordinatorContext.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilderTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilderTests.java create mode 100644 x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/80_random_rerank_retriever.yml diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index fd3a3d8672966..1009d9e2ae7d1 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -191,6 +191,7 @@ static TransportVersion def(int id) { public static final TransportVersion INGEST_PIPELINE_EXCEPTION_ADDED = def(8_721_00_0); public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_722_00_0); public static final TransportVersion REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES = def(8_723_00_0); + public static final TransportVersion RANDOM_RERANKER_RETRIEVER = def(8_724_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java index 4cc7f5b502ba9..12a32ecdc6d4f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java @@ -9,6 +9,7 @@ import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder; import java.util.Set; @@ -20,7 +21,10 @@ public class InferenceFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of(TextSimilarityRankRetrieverBuilder.TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED); + return Set.of( + TextSimilarityRankRetrieverBuilder.TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED, + RandomRankRetrieverBuilder.RANDOM_RERANKER_RETRIEVER_SUPPORTED + ); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index f6d4a9f774a91..9d85bbf751250 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -63,6 +63,8 @@ import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder; +import org.elasticsearch.xpack.inference.rank.random.RandomRankBuilder; +import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankBuilder; import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder; import org.elasticsearch.xpack.inference.registry.ModelRegistry; @@ -243,6 +245,7 @@ public List getInferenceServiceFactories() { public List getNamedWriteables() { var entries = new ArrayList<>(InferenceNamedWriteablesProvider.getNamedWriteables()); entries.add(new NamedWriteableRegistry.Entry(RankBuilder.class, TextSimilarityRankBuilder.NAME, TextSimilarityRankBuilder::new)); + entries.add(new NamedWriteableRegistry.Entry(RankBuilder.class, RandomRankBuilder.NAME, RandomRankBuilder::new)); return entries; } @@ -336,7 +339,8 @@ public List> getQueries() { @Override public List> getRetrievers() { return List.of( - new RetrieverSpec<>(new ParseField(TextSimilarityRankBuilder.NAME), TextSimilarityRankRetrieverBuilder::fromXContent) + new RetrieverSpec<>(new ParseField(TextSimilarityRankBuilder.NAME), TextSimilarityRankRetrieverBuilder::fromXContent), + new RetrieverSpec<>(new ParseField(RandomRankBuilder.NAME), RandomRankRetrieverBuilder::fromXContent) ); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilder.java new file mode 100644 index 0000000000000..fdb5503e491eb --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilder.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.random; + +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.Query; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.rank.RankBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.rank.context.QueryPhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.context.QueryPhaseRankShardContext; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankShardContext; +import org.elasticsearch.search.rank.feature.RankFeatureDoc; +import org.elasticsearch.search.rank.rerank.RerankingQueryPhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.rerank.RerankingQueryPhaseRankShardContext; +import org.elasticsearch.search.rank.rerank.RerankingRankFeaturePhaseRankShardContext; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder.FIELD_FIELD; +import static org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder.SEED_FIELD; + +/** + * A {@code RankBuilder} that performs reranking with random scores, used for testing. + */ +public class RandomRankBuilder extends RankBuilder { + + public static final String NAME = "random_reranker"; + + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, args -> { + Integer rankWindowSize = args[0] == null ? DEFAULT_RANK_WINDOW_SIZE : (Integer) args[0]; + String field = (String) args[1]; + Integer seed = (Integer) args[2]; + + return new RandomRankBuilder(rankWindowSize, field, seed); + }); + + static { + PARSER.declareInt(optionalConstructorArg(), RANK_WINDOW_SIZE_FIELD); + PARSER.declareString(constructorArg(), FIELD_FIELD); + PARSER.declareInt(optionalConstructorArg(), SEED_FIELD); + } + + private final String field; + private final Integer seed; + + public RandomRankBuilder(int rankWindowSize, String field, Integer seed) { + super(rankWindowSize); + + if (field == null || field.isEmpty()) { + throw new IllegalArgumentException("field is required"); + } + + this.field = field; + this.seed = seed; + } + + public RandomRankBuilder(StreamInput in) throws IOException { + super(in); + // rankWindowSize deserialization is handled by the parent class RankBuilder + this.field = in.readString(); + this.seed = in.readOptionalInt(); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.RANDOM_RERANKER_RETRIEVER; + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + // rankWindowSize serialization is handled by the parent class RankBuilder + out.writeString(field); + out.writeOptionalInt(seed); + } + + @Override + public void doXContent(XContentBuilder builder, Params params) throws IOException { + // rankWindowSize serialization is handled by the parent class RankBuilder + builder.field(FIELD_FIELD.getPreferredName(), field); + if (seed != null) { + builder.field(SEED_FIELD.getPreferredName(), seed); + } + } + + @Override + public boolean isCompoundBuilder() { + return false; + } + + @Override + public Explanation explainHit(Explanation baseExplanation, RankDoc scoreDoc, List queryNames) { + if (scoreDoc == null) { + return baseExplanation; + } + if (false == baseExplanation.isMatch()) { + return baseExplanation; + } + + assert scoreDoc instanceof RankFeatureDoc : "ScoreDoc is not an instance of RankFeatureDoc"; + RankFeatureDoc rankFeatureDoc = (RankFeatureDoc) scoreDoc; + + return Explanation.match( + rankFeatureDoc.score, + "rank after reranking: [" + rankFeatureDoc.rank + "] using seed [" + seed + "] with score: [" + rankFeatureDoc.score + "]", + baseExplanation + ); + } + + @Override + public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { + return new RerankingQueryPhaseRankShardContext(queries, rankWindowSize()); + } + + @Override + public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { + return new RerankingQueryPhaseRankCoordinatorContext(rankWindowSize()); + } + + @Override + public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { + return new RerankingRankFeaturePhaseRankShardContext(field); + } + + @Override + public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext(int size, int from, Client client) { + return new RandomRankFeaturePhaseRankCoordinatorContext(size, from, rankWindowSize(), seed); + } + + public String field() { + return field; + } + + @Override + protected boolean doEquals(RankBuilder other) { + RandomRankBuilder that = (RandomRankBuilder) other; + return Objects.equals(field, that.field) && Objects.equals(seed, that.seed); + } + + @Override + protected int doHashCode() { + return Objects.hash(field, seed); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankFeaturePhaseRankCoordinatorContext.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankFeaturePhaseRankCoordinatorContext.java new file mode 100644 index 0000000000000..446d8e5862dd2 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankFeaturePhaseRankCoordinatorContext.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.random; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.feature.RankFeatureDoc; + +import java.util.Arrays; +import java.util.Comparator; +import java.util.Random; + +/** + * A {@code RankFeaturePhaseRankCoordinatorContext} that performs a rerank inference call to determine relevance scores for documents within + * the provided rank window. + */ +public class RandomRankFeaturePhaseRankCoordinatorContext extends RankFeaturePhaseRankCoordinatorContext { + + private final Integer seed; + + public RandomRankFeaturePhaseRankCoordinatorContext(int size, int from, int rankWindowSize, Integer seed) { + super(size, from, rankWindowSize); + this.seed = seed; + } + + @Override + protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { + // Generate random scores seeded by doc + float[] scores = new float[featureDocs.length]; + for (int i = 0; i < featureDocs.length; i++) { + RankFeatureDoc featureDoc = featureDocs[i]; + int doc = featureDoc.doc; + long docSeed = seed != null ? seed + doc : doc; + scores[i] = new Random(docSeed).nextFloat(); + } + scoreListener.onResponse(scores); + } + + /** + * Sorts documents by score descending. + * @param originalDocs documents to process + */ + @Override + protected RankFeatureDoc[] preprocess(RankFeatureDoc[] originalDocs) { + return Arrays.stream(originalDocs) + .sorted(Comparator.comparing((RankFeatureDoc doc) -> doc.score).reversed()) + .toArray(RankFeatureDoc[]::new); + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java new file mode 100644 index 0000000000000..ab8c85cac00e3 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.random; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.retriever.RetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverParserContext; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * A {@code RetrieverBuilder} for parsing and constructing a text similarity reranker retriever. + */ +public class RandomRankRetrieverBuilder extends RetrieverBuilder { + + public static final NodeFeature RANDOM_RERANKER_RETRIEVER_SUPPORTED = new NodeFeature("random_reranker_retriever_supported"); + + public static final ParseField RETRIEVER_FIELD = new ParseField("retriever"); + public static final ParseField FIELD_FIELD = new ParseField("field"); + public static final ParseField RANK_WINDOW_SIZE_FIELD = new ParseField("rank_window_size"); + public static final ParseField SEED_FIELD = new ParseField("seed"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(RandomRankBuilder.NAME, args -> { + RetrieverBuilder retrieverBuilder = (RetrieverBuilder) args[0]; + String field = (String) args[1]; + int rankWindowSize = args[2] == null ? DEFAULT_RANK_WINDOW_SIZE : (int) args[2]; + Integer seed = (Integer) args[3]; + + return new RandomRankRetrieverBuilder(retrieverBuilder, field, rankWindowSize, seed); + }); + + static { + PARSER.declareNamedObject(constructorArg(), (p, c, n) -> p.namedObject(RetrieverBuilder.class, n, c), RETRIEVER_FIELD); + PARSER.declareString(optionalConstructorArg(), FIELD_FIELD); + PARSER.declareInt(optionalConstructorArg(), RANK_WINDOW_SIZE_FIELD); + PARSER.declareInt(optionalConstructorArg(), SEED_FIELD); + + RetrieverBuilder.declareBaseParserFields(RandomRankBuilder.NAME, PARSER); + } + + public static RandomRankRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) throws IOException { + if (context.clusterSupportsFeature(RANDOM_RERANKER_RETRIEVER_SUPPORTED) == false) { + throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + RandomRankBuilder.NAME + "]"); + } + return PARSER.apply(parser, context); + } + + private final RetrieverBuilder retrieverBuilder; + private final String field; + private final int rankWindowSize; + private final Integer seed; + + public RandomRankRetrieverBuilder(RetrieverBuilder retrieverBuilder, String field, int rankWindowSize, Integer seed) { + this.retrieverBuilder = retrieverBuilder; + this.field = field; + this.rankWindowSize = rankWindowSize; + this.seed = seed; + } + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + retrieverBuilder.extractToSearchSourceBuilder(searchSourceBuilder, compoundUsed); + + // Combining with other rank builder (such as RRF) is not supported + if (searchSourceBuilder.rankBuilder() != null) { + throw new IllegalArgumentException("random rank builder cannot be combined with other rank builders"); + } + + searchSourceBuilder.rankBuilder(new RandomRankBuilder(this.rankWindowSize, this.field, this.seed)); + } + + @Override + public String getName() { + return RandomRankBuilder.NAME; + } + + public int rankWindowSize() { + return rankWindowSize; + } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(RETRIEVER_FIELD.getPreferredName()); + builder.startObject(); + builder.field(retrieverBuilder.getName(), retrieverBuilder); + builder.endObject(); + builder.field(FIELD_FIELD.getPreferredName(), field); + builder.field(RANK_WINDOW_SIZE_FIELD.getPreferredName(), rankWindowSize); + if (seed != null) { + builder.field(SEED_FIELD.getPreferredName(), seed); + } + } + + @Override + protected boolean doEquals(Object other) { + RandomRankRetrieverBuilder that = (RandomRankRetrieverBuilder) other; + return Objects.equals(retrieverBuilder, that.retrieverBuilder) + && Objects.equals(field, that.field) + && Objects.equals(rankWindowSize, that.rankWindowSize) + && Objects.equals(seed, that.seed); + } + + @Override + protected int doHashCode() { + return Objects.hash(retrieverBuilder, field, rankWindowSize, seed); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilderTests.java new file mode 100644 index 0000000000000..c464dbaea47cd --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilderTests.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.random; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; + +import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; + +public class RandomRankBuilderTests extends AbstractXContentSerializingTestCase { + + @Override + protected RandomRankBuilder createTestInstance() { + return new RandomRankBuilder(randomIntBetween(1, 1000), "my-field", randomBoolean() ? randomIntBetween(1, 1000) : null); + } + + @Override + protected RandomRankBuilder mutateInstance(RandomRankBuilder instance) throws IOException { + String field = instance.field() + randomAlphaOfLength(2); + int rankWindowSize = randomValueOtherThan(instance.rankWindowSize(), this::randomRankWindowSize); + Integer seed = randomBoolean() ? randomIntBetween(1, 1000) : null; + return new RandomRankBuilder(rankWindowSize, field, seed); + } + + @Override + protected Writeable.Reader instanceReader() { + return RandomRankBuilder::new; + } + + @Override + protected RandomRankBuilder doParseInstance(XContentParser parser) throws IOException { + parser.nextToken(); + assertEquals(parser.currentToken(), XContentParser.Token.START_OBJECT); + parser.nextToken(); + assertEquals(parser.currentToken(), XContentParser.Token.FIELD_NAME); + assertEquals(parser.currentName(), RandomRankBuilder.NAME); + RandomRankBuilder builder = RandomRankBuilder.PARSER.parse(parser, null); + parser.nextToken(); + assertEquals(parser.currentToken(), XContentParser.Token.END_OBJECT); + parser.nextToken(); + assertNull(parser.currentToken()); + return builder; + } + + private int randomRankWindowSize() { + return randomIntBetween(0, 1000); + } + + public void testParserDefaults() throws IOException { + String json = """ + { + "field": "my-field" + }"""; + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + RandomRankBuilder parsed = RandomRankBuilder.PARSER.parse(parser, null); + assertEquals(DEFAULT_RANK_WINDOW_SIZE, parsed.rankWindowSize()); + } + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilderTests.java new file mode 100644 index 0000000000000..c33f30d461350 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilderTests.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.random; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.search.retriever.RetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverParserContext; +import org.elasticsearch.search.retriever.TestRetrieverBuilder; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.usage.SearchUsage; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankBuilder; +import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; + +public class RandomRankRetrieverBuilderTests extends AbstractXContentTestCase { + + /** + * Creates a random {@link RandomRankRetrieverBuilder}. The created instance + * is not guaranteed to pass {@link SearchRequest} validation. This is purely + * for x-content testing. + */ + public static RandomRankRetrieverBuilder createRandomRankRetrieverBuilder() { + return new RandomRankRetrieverBuilder( + TestRetrieverBuilder.createRandomTestRetrieverBuilder(), + randomAlphaOfLength(10), + randomIntBetween(1, 10000), + randomBoolean() ? randomIntBetween(1, 1000) : null + ); + } + + @Override + protected RandomRankRetrieverBuilder createTestInstance() { + return createRandomRankRetrieverBuilder(); + } + + @Override + protected RandomRankRetrieverBuilder doParseInstance(XContentParser parser) { + return RandomRankRetrieverBuilder.PARSER.apply( + parser, + new RetrieverParserContext( + new SearchUsage(), + nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == RandomRankRetrieverBuilder.RANDOM_RERANKER_RETRIEVER_SUPPORTED + ) + ); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(); + entries.add( + new NamedXContentRegistry.Entry( + RetrieverBuilder.class, + TestRetrieverBuilder.TEST_SPEC.getName(), + (p, c) -> TestRetrieverBuilder.TEST_SPEC.getParser().fromXContent(p, (RetrieverParserContext) c), + TestRetrieverBuilder.TEST_SPEC.getName().getForRestApiVersion() + ) + ); + entries.add( + new NamedXContentRegistry.Entry( + RetrieverBuilder.class, + new ParseField(TextSimilarityRankBuilder.NAME), + (p, c) -> TextSimilarityRankRetrieverBuilder.PARSER.apply(p, (RetrieverParserContext) c) + ) + ); + return new NamedXContentRegistry(entries); + } + + public void testParserDefaults() throws IOException { + String json = """ + { + "retriever": { + "test": { + "value": "my-test-retriever" + } + }, + "field": "my-field" + }"""; + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + RandomRankRetrieverBuilder parsed = RandomRankRetrieverBuilder.PARSER.parse(parser, null); + assertEquals(DEFAULT_RANK_WINDOW_SIZE, parsed.rankWindowSize()); + } + } + +} diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/80_random_rerank_retriever.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/80_random_rerank_retriever.yml new file mode 100644 index 0000000000000..d33f57f763db8 --- /dev/null +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/80_random_rerank_retriever.yml @@ -0,0 +1,94 @@ +setup: + - requires: + cluster_features: "gte_v8.16.0" + reason: random rerank retriever introduced in 8.16.0 + test_runner_features: "close_to" + + - do: + indices.create: + index: test-index + body: + mappings: + properties: + text: + type: text + topic: + type: keyword + subtopic: + type: keyword + + - do: + bulk: + refresh: true + index: test-index + body: | + {"index": { "_id": "doc_1" } } + { "text": "Pugs are proof that even nature has a sense of humor." } + {"index": { "_id": "doc_2" } } + { "text": "A pugs snore can rival a chainsaw, but it's somehow adorable." } + {"index": { "_id": "doc_3" } } + { "text": "Pugs are like potato chips; you can't have just one wrinkle." } + {"index": { "_id": "doc_4" } } + { "text": "Pugs don't walk; pugs waddle majestically." } + {"index": { "_id": "doc_5" } } + { "text": "A pugs life goal: be the ultimate couch potato, and they're crushing it." } +--- +"Random rerank retriever predictably shuffles results": + + - do: + search: + index: test-index + body: + query: + query_string: + query: "pugs" + size: 10 + + - match: { hits.total.value: 5 } + - length: { hits.hits: 5 } + + - match: { hits.hits.0._id: "doc_4" } + - close_to: { hits.hits.0._score: { value: 0.136, error: 0.001 } } + + - do: + search: + index: test-index + body: + retriever: + random_reranker: + retriever: + standard: + query: + query_string: + query: "pugs" + field: text + seed: 42 + rank_window_size: 10 + size: 10 + + - match: { hits.total.value: 5 } + - length: { hits.hits: 5 } + + - match: { hits.hits.0._id: "doc_1" } + - close_to: { hits.hits.0._score: { value: 0.727, error: 0.001 } } + + - do: + search: + index: test-index + body: + retriever: + random_reranker: + retriever: + standard: + query: + query_string: + query: "pugs" + field: text + rank_window_size: 10 + size: 10 + + - match: { hits.total.value: 5 } + - length: { hits.hits: 5 } + + - match: { hits.hits.0._id: "doc_3" } + - close_to: { hits.hits.0._score: { value: 0.731, error: 0.001 } } From ba8590ba13b898909eb2418671478d9e9643e09d Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Mon, 19 Aug 2024 18:57:39 +0400 Subject: [PATCH 075/389] Add analysis-common YAML tests to rest-resources-zip (#111974) --- modules/analysis-common/build.gradle | 3 +++ .../test/indices.analyze/{10_analyze.yml => 15_analyze.yml} | 0 x-pack/rest-resources-zip/build.gradle | 1 + 3 files changed, 4 insertions(+) rename modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/{10_analyze.yml => 15_analyze.yml} (100%) diff --git a/modules/analysis-common/build.gradle b/modules/analysis-common/build.gradle index 77fd095806d10..1fc42a1b294fe 100644 --- a/modules/analysis-common/build.gradle +++ b/modules/analysis-common/build.gradle @@ -36,3 +36,6 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> task.skipTest("search.query/50_queries_with_synonyms/Test common terms query with stacked tokens", "#42654 - `common` query throws an exception") } +artifacts { + restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) +} diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/15_analyze.yml similarity index 100% rename from modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/10_analyze.yml rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/15_analyze.yml diff --git a/x-pack/rest-resources-zip/build.gradle b/x-pack/rest-resources-zip/build.gradle index 3d0533b4ec57e..cc5bddf12d801 100644 --- a/x-pack/rest-resources-zip/build.gradle +++ b/x-pack/rest-resources-zip/build.gradle @@ -20,6 +20,7 @@ dependencies { apis project(path: ':rest-api-spec', configuration: 'restSpecs') freeTests project(path: ':rest-api-spec', configuration: 'restTests') freeTests project(path: ':modules:aggregations', configuration: 'restTests') + freeTests project(path: ':modules:analysis-common', configuration: 'restTests') compatApis project(path: ':rest-api-spec', configuration: 'restCompatSpecs') compatApis project(path: ':x-pack:plugin', configuration: 'restCompatSpecs') freeCompatTests project(path: ':rest-api-spec', configuration: 'restCompatTests') From aa959e69cc507a16f7f725240db2e7453c0a8320 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Mon, 19 Aug 2024 17:18:15 +0200 Subject: [PATCH 076/389] ES|QL: shorten error messages for UnsupportedAttributes (#111973) When dealing with index patterns, eg. `FROM logs-*`, some fields can have the same name but different types in different indices. In this case we build an error message like ``` Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types: [ip] in [test1, test2], [keyword] in [test3]" ``` With this PR, in case of many indices involved, we avoid listing them all, but we only list three of them and provide information about how many other indices are affected, eg. ``` Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types: [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6] ``` (see the `and [2] other indices`) Since these error messages are stored in `UnspportedAttributes` and serialized, this PR reduces significantly the size of a serialized execution plan with many type conflicts. Fixes https://github.com/elastic/elasticsearch/issues/111964 Related to https://github.com/elastic/elasticsearch/issues/111358 --- .../esql/core/type/InvalidMappedField.java | 8 +- .../xpack/esql/action/EsqlCapabilities.java | 7 +- .../xpack/esql/analysis/VerifierTests.java | 31 +++-- .../test/esql/51_many_indexes.yml | 126 ++++++++++++++++++ 4 files changed, 156 insertions(+), 16 deletions(-) create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/51_many_indexes.yml diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java index 9b3d7950c2a01..8b15893f8a056 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java @@ -130,7 +130,13 @@ private static String makeErrorMessage(Map> typesToIndices) errorMessage.append("["); errorMessage.append(e.getKey()); errorMessage.append("] in "); - errorMessage.append(e.getValue()); + if (e.getValue().size() <= 3) { + errorMessage.append(e.getValue()); + } else { + errorMessage.append(e.getValue().stream().sorted().limit(3).collect(Collectors.toList())); + errorMessage.append(" and [" + (e.getValue().size() - 3) + "] other "); + errorMessage.append(e.getValue().size() == 4 ? "index" : "indices"); + } } return errorMessage.toString(); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 3abbb655dadd3..996c5ac2ea319 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -229,7 +229,12 @@ public enum Cap { /** * Consider the upper bound when computing the interval in BUCKET auto mode. */ - BUCKET_INCLUSIVE_UPPER_BOUND; + BUCKET_INCLUSIVE_UPPER_BOUND, + + /** + * Changed error messages for fields with conflicting types in different indices. + */ + SHORT_ERROR_MESSAGES_FOR_UNSUPPORTED_FIELDS; private final boolean snapshotOnly; private final FeatureFlag featureFlag; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 904308ef64d58..9b0c32b8ade2e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -64,9 +64,12 @@ public void testUnsupportedAndMultiTypedFields() { LinkedHashSet ipIndices = new LinkedHashSet<>(); ipIndices.add("test1"); ipIndices.add("test2"); + ipIndices.add("test3"); + ipIndices.add("test4"); + ipIndices.add("test5"); LinkedHashMap> typesToIndices = new LinkedHashMap<>(); typesToIndices.put("ip", ipIndices); - typesToIndices.put("keyword", Set.of("test3")); + typesToIndices.put("keyword", Set.of("test6")); EsField multiTypedField = new InvalidMappedField(multiTyped, typesToIndices); // Also add an unsupported/multityped field under the names `int` and `double` so we can use `LOOKUP int_number_names ...` and @@ -85,7 +88,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:22: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | dissect multi_typed \"%{foo}\"", analyzer) ); @@ -95,7 +98,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:19: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | grok multi_typed \"%{WORD:foo}\"", analyzer) ); @@ -115,7 +118,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:23: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | eval x = multi_typed", analyzer) ); @@ -125,7 +128,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:32: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | eval x = to_lower(multi_typed)", analyzer) ); @@ -135,7 +138,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:32: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | stats count(1) by multi_typed", analyzer) ); if (EsqlCapabilities.Cap.INLINESTATS.isEnabled()) { @@ -145,7 +148,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:38: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | inlinestats count(1) by multi_typed", analyzer) ); } @@ -156,7 +159,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:27: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | stats values(multi_typed)", analyzer) ); if (EsqlCapabilities.Cap.INLINESTATS.isEnabled()) { @@ -166,7 +169,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:33: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | inlinestats values(multi_typed)", analyzer) ); } @@ -177,7 +180,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:27: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | stats values(multi_typed)", analyzer) ); @@ -200,7 +203,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:24: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | mv_expand multi_typed", analyzer) ); @@ -210,7 +213,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:21: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | rename multi_typed as x", analyzer) ); @@ -220,7 +223,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:19: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | sort multi_typed desc", analyzer) ); @@ -230,7 +233,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:20: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | where multi_typed is not null", analyzer) ); } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/51_many_indexes.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/51_many_indexes.yml new file mode 100644 index 0000000000000..eb589cb810cc3 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/51_many_indexes.yml @@ -0,0 +1,126 @@ +setup: + - requires: + capabilities: + - method: POST + path: /_query + parameters: [method, path, parameters, capabilities] + capabilities: [short_error_messages_for_unsupported_fields] + reason: "We changed error messages for unsupported fields in v 8.16" + test_runner_features: [capabilities, allowed_warnings_regex] + + - do: + indices.create: + index: ambiguous_1 + body: + mappings: + properties: + "name": + type: keyword + + - do: + indices.create: + index: ambiguous_2 + body: + mappings: + properties: + "name": + type: keyword + + - do: + indices.create: + index: ambiguous_3 + body: + mappings: + properties: + "name": + type: keyword + + - do: + indices.create: + index: ambiguous_4 + body: + mappings: + properties: + "name": + type: integer + + - do: + indices.create: + index: ambiguous_5 + body: + mappings: + properties: + "name": + type: integer + + - do: + indices.create: + index: ambiguous_6 + body: + mappings: + properties: + "name": + type: integer + + - do: + indices.create: + index: ambiguous_7 + body: + mappings: + properties: + "name": + type: integer + + - do: + indices.create: + index: ambiguous_8 + body: + mappings: + properties: + "name": + type: ip + + - do: + indices.create: + index: ambiguous_9 + body: + mappings: + properties: + "name": + type: ip + + - do: + indices.create: + index: ambiguous_10 + body: + mappings: + properties: + "name": + type: ip + + - do: + indices.create: + index: ambiguous_11 + body: + mappings: + properties: + "name": + type: ip + + - do: + indices.create: + index: ambiguous_12 + body: + mappings: + properties: + "name": + type: ip + +--- +load many indices with ambiguities: + - do: + catch: '/Cannot use field \[name\] due to ambiguities being mapped as \[3\] incompatible types: \[integer\] in \[ambiguous_4, ambiguous_5, ambiguous_6\] and \[1\] other index, \[ip\] in \[ambiguous_10, ambiguous_11, ambiguous_12\] and \[2\] other indices, \[keyword\] in \[ambiguous_1, ambiguous_2, ambiguous_3\]/' + esql.query: + body: + query: 'FROM ambiguous* | SORT name' + From e6b830e3b3e3665dff061c4e6c92285efdb1df55 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 19 Aug 2024 17:49:48 +0100 Subject: [PATCH 077/389] Clean up dangling S3 multipart uploads (#111955) If Elasticsearch fails part-way through a multipart upload to S3 it will generally try and abort the upload, but it's possible that the abort attempt also fails. In this case the upload becomes _dangling_. Dangling uploads consume storage space, and therefore cost money, until they are eventually aborted. Earlier versions of Elasticsearch require users to check for dangling multipart uploads, and to manually abort any that they find. This commit introduces a cleanup process which aborts all dangling uploads on each snapshot delete instead. Closes #44971 Closes #101169 --- docs/changelog/111955.yaml | 7 ++ .../snapshot-restore/repository-s3.asciidoc | 36 ++---- .../s3/S3BlobStoreRepositoryTests.java | 113 ++++++++++++++++++ .../repositories/s3/S3BlobContainer.java | 95 +++++++++++++++ .../repositories/s3/S3Repository.java | 95 +++++++++++++++ .../main/java/fixture/s3/S3HttpHandler.java | 11 +- 6 files changed, 329 insertions(+), 28 deletions(-) create mode 100644 docs/changelog/111955.yaml diff --git a/docs/changelog/111955.yaml b/docs/changelog/111955.yaml new file mode 100644 index 0000000000000..ebc518203b7cc --- /dev/null +++ b/docs/changelog/111955.yaml @@ -0,0 +1,7 @@ +pr: 111955 +summary: Clean up dangling S3 multipart uploads +area: Snapshot/Restore +type: enhancement +issues: + - 101169 + - 44971 diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index d757a74110ca9..3a9c12caebad9 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -317,6 +317,15 @@ include::repository-shared-settings.asciidoc[] https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html[AWS DeleteObjects API]. +`max_multipart_upload_cleanup_size`:: + + (<>) Sets the maximum number of possibly-dangling multipart + uploads to clean up in each batch of snapshot deletions. Defaults to `1000` + which is the maximum number supported by the + https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html[AWS + ListMultipartUploads API]. If set to `0`, {es} will not attempt to clean up + dangling multipart uploads. + NOTE: The option of defining client settings in the repository settings as documented below is considered deprecated, and will be removed in a future version. @@ -492,33 +501,6 @@ by the `elasticsearch` user. By default, {es} runs as user `elasticsearch` using If the symlink exists, it will be used by default by all S3 repositories that don't have explicit `client` credentials. -==== Cleaning up multi-part uploads - -{es} uses S3's multi-part upload process to upload larger blobs to the -repository. The multi-part upload process works by dividing each blob into -smaller parts, uploading each part independently, and then completing the -upload in a separate step. This reduces the amount of data that {es} must -re-send if an upload fails: {es} only needs to re-send the part that failed -rather than starting from the beginning of the whole blob. The storage for each -part is charged independently starting from the time at which the part was -uploaded. - -If a multi-part upload cannot be completed then it must be aborted in order to -delete any parts that were successfully uploaded, preventing further storage -charges from accumulating. {es} will automatically abort a multi-part upload on -failure, but sometimes the abort request itself fails. For example, if the -repository becomes inaccessible or the instance on which {es} is running is -terminated abruptly then {es} cannot complete or abort any ongoing uploads. - -You must make sure that failed uploads are eventually aborted to avoid -unnecessary storage costs. You can use the -https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html[List -multipart uploads API] to list the ongoing uploads and look for any which are -unusually long-running, or you can -https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpu-abort-incomplete-mpu-lifecycle-config.html[configure -a bucket lifecycle policy] to automatically abort incomplete uploads once they -reach a certain age. - [[repository-s3-aws-vpc]] ==== AWS VPC bandwidth settings diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 1132111826563..1ab370ad203fc 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -10,13 +10,20 @@ import fixture.s3.S3HttpHandler; import com.amazonaws.http.AmazonHttpClient; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; +import com.amazonaws.services.s3.model.MultipartUpload; import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.core.LogEvent; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; @@ -54,6 +61,7 @@ import org.elasticsearch.telemetry.TestTelemetryPlugin; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -70,6 +78,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -81,6 +90,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasEntry; @@ -451,6 +461,106 @@ private Map getServerMetrics() { return Collections.emptyMap(); } + public void testMultipartUploadCleanup() { + final String repoName = randomRepositoryName(); + createRepository(repoName, repositorySettings(repoName), true); + + createIndex("test-idx-1"); + for (int i = 0; i < 100; i++) { + prepareIndex("test-idx-1").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + } + client().admin().indices().prepareRefresh().get(); + + final String snapshotName = randomIdentifier(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(true) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + + final var repository = asInstanceOf( + S3Repository.class, + internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName) + ); + final var blobStore = asInstanceOf(S3BlobStore.class, asInstanceOf(BlobStoreWrapper.class, repository.blobStore()).delegate()); + + try (var clientRef = blobStore.clientReference()) { + final var danglingBlobName = randomIdentifier(); + final var initiateMultipartUploadRequest = new InitiateMultipartUploadRequest( + blobStore.bucket(), + blobStore.blobContainer(repository.basePath().add("test-multipart-upload")).path().buildAsString() + danglingBlobName + ); + initiateMultipartUploadRequest.putCustomQueryParameter( + S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, + OperationPurpose.SNAPSHOT_DATA.getKey() + ); + final var multipartUploadResult = clientRef.client().initiateMultipartUpload(initiateMultipartUploadRequest); + + final var listMultipartUploadsRequest = new ListMultipartUploadsRequest(blobStore.bucket()).withPrefix( + repository.basePath().buildAsString() + ); + listMultipartUploadsRequest.putCustomQueryParameter( + S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, + OperationPurpose.SNAPSHOT_DATA.getKey() + ); + assertEquals( + List.of(multipartUploadResult.getUploadId()), + clientRef.client() + .listMultipartUploads(listMultipartUploadsRequest) + .getMultipartUploads() + .stream() + .map(MultipartUpload::getUploadId) + .toList() + ); + + final var seenCleanupLogLatch = new CountDownLatch(1); + MockLog.assertThatLogger(() -> { + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName)); + safeAwait(seenCleanupLogLatch); + }, + S3BlobContainer.class, + new MockLog.SeenEventExpectation( + "found-dangling", + S3BlobContainer.class.getCanonicalName(), + Level.INFO, + "found [1] possibly-dangling multipart uploads; will clean them up after finalizing the current snapshot deletions" + ), + new MockLog.SeenEventExpectation( + "cleaned-dangling", + S3BlobContainer.class.getCanonicalName(), + Level.INFO, + Strings.format( + "cleaned up dangling multipart upload [%s] of blob [%s]*test-multipart-upload/%s]", + multipartUploadResult.getUploadId(), + repoName, + danglingBlobName + ) + ) { + @Override + public void match(LogEvent event) { + super.match(event); + if (Regex.simpleMatch(message, event.getMessage().getFormattedMessage())) { + seenCleanupLogLatch.countDown(); + } + } + } + ); + + assertThat( + clientRef.client() + .listMultipartUploads(listMultipartUploadsRequest) + .getMultipartUploads() + .stream() + .map(MultipartUpload::getUploadId) + .toList(), + empty() + ); + } + } + /** * S3RepositoryPlugin that allows to disable chunked encoding and to set a low threshold between single upload and multipart upload. */ @@ -592,6 +702,9 @@ public void maybeTrack(final String rawRequest, Headers requestHeaders) { trackRequest("ListObjects"); metricsCount.computeIfAbsent(new S3BlobStore.StatsKey(S3BlobStore.Operation.LIST_OBJECTS, purpose), k -> new AtomicLong()) .incrementAndGet(); + } else if (Regex.simpleMatch("GET /*/?uploads&*", request)) { + // TODO track ListMultipartUploads requests + logger.info("--> ListMultipartUploads not tracked [{}] with parsed purpose [{}]", request, purpose.getKey()); } else if (Regex.simpleMatch("GET /*/*", request)) { trackRequest("GetObject"); metricsCount.computeIfAbsent(new S3BlobStore.StatsKey(S3BlobStore.Operation.GET_OBJECT, purpose), k -> new AtomicLong()) diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index 3e2249bf82bb6..cf3e73df2aee2 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -28,13 +28,17 @@ import com.amazonaws.services.s3.model.UploadPartResult; import com.amazonaws.util.ValidationUtils; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.action.support.ThreadedActionListener; +import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; @@ -54,6 +58,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.ChunkedBlobOutputStream; import org.elasticsearch.repositories.s3.S3BlobStore.Operation; import org.elasticsearch.threadpool.ThreadPool; @@ -912,4 +917,94 @@ public void getRegister(OperationPurpose purpose, String key, ActionListener getMultipartUploadCleanupListener(int maxUploads, RefCountingRunnable refs) { + try (var clientReference = blobStore.clientReference()) { + final var bucket = blobStore.bucket(); + final var request = new ListMultipartUploadsRequest(bucket).withPrefix(keyPath).withMaxUploads(maxUploads); + request.putCustomQueryParameter(S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, OperationPurpose.SNAPSHOT_DATA.getKey()); + final var multipartUploadListing = SocketAccess.doPrivileged(() -> clientReference.client().listMultipartUploads(request)); + final var multipartUploads = multipartUploadListing.getMultipartUploads(); + if (multipartUploads.isEmpty()) { + logger.debug("found no multipart uploads to clean up"); + return ActionListener.noop(); + } else { + // the uploads are only _possibly_ dangling because it's also possible we're no longer then master and the new master has + // started some more shard snapshots + if (multipartUploadListing.isTruncated()) { + logger.info(""" + found at least [{}] possibly-dangling multipart uploads; will clean up the first [{}] after finalizing \ + the current snapshot deletions, and will check for further possibly-dangling multipart uploads in future \ + snapshot deletions""", multipartUploads.size(), multipartUploads.size()); + } else { + logger.info(""" + found [{}] possibly-dangling multipart uploads; \ + will clean them up after finalizing the current snapshot deletions""", multipartUploads.size()); + } + return newMultipartUploadCleanupListener( + refs, + multipartUploads.stream().map(u -> new AbortMultipartUploadRequest(bucket, u.getKey(), u.getUploadId())).toList() + ); + } + } catch (Exception e) { + // Cleanup is a best-effort thing, we can't do anything better than log and carry on here. + logger.warn("failure while checking for possibly-dangling multipart uploads", e); + return ActionListener.noop(); + } + } + + private ActionListener newMultipartUploadCleanupListener( + RefCountingRunnable refs, + List abortMultipartUploadRequests + ) { + return new ThreadedActionListener<>(blobStore.getSnapshotExecutor(), ActionListener.releaseAfter(new ActionListener<>() { + @Override + public void onResponse(Void unused) { + try (var clientReference = blobStore.clientReference()) { + for (final var abortMultipartUploadRequest : abortMultipartUploadRequests) { + abortMultipartUploadRequest.putCustomQueryParameter( + S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, + OperationPurpose.SNAPSHOT_DATA.getKey() + ); + try { + SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortMultipartUploadRequest)); + logger.info( + "cleaned up dangling multipart upload [{}] of blob [{}][{}][{}]", + abortMultipartUploadRequest.getUploadId(), + blobStore.getRepositoryMetadata().name(), + abortMultipartUploadRequest.getBucketName(), + abortMultipartUploadRequest.getKey() + ); + } catch (Exception e) { + // Cleanup is a best-effort thing, we can't do anything better than log and carry on here. Note that any failure + // is surprising, even a 404 means that something else aborted/completed the upload at a point where there + // should be no other processes interacting with the repository. + logger.warn( + Strings.format( + "failed to clean up multipart upload [{}] of blob [{}][{}][{}]", + abortMultipartUploadRequest.getUploadId(), + blobStore.getRepositoryMetadata().name(), + abortMultipartUploadRequest.getBucketName(), + abortMultipartUploadRequest.getKey() + ), + e + ); + } + } + } + } + + @Override + public void onFailure(Exception e) { + logger.log( + MasterService.isPublishFailureException(e) + || (e instanceof RepositoryException repositoryException + && repositoryException.getCause() instanceof Exception cause + && MasterService.isPublishFailureException(cause)) ? Level.DEBUG : Level.WARN, + "failed to start cleanup of dangling multipart uploads", + e + ); + } + }, refs.acquire())); + } } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 72b48c5903629..a6edb0dec4122 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ReferenceDocs; @@ -28,6 +29,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -36,14 +38,17 @@ import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; import org.elasticsearch.snapshots.SnapshotDeleteListener; +import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; +import java.util.Collection; import java.util.Map; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; @@ -183,6 +188,16 @@ class S3Repository extends MeteredBlobStoreRepository { S3BlobStore.MAX_BULK_DELETES ); + /** + * Maximum number of uploads to request for cleanup when doing a snapshot delete. + */ + static final Setting MAX_MULTIPART_UPLOAD_CLEANUP_SIZE = Setting.intSetting( + "max_multipart_upload_cleanup_size", + 1000, + 0, + Setting.Property.Dynamic + ); + private final S3Service service; private final String bucket; @@ -459,4 +474,84 @@ public String getAnalysisFailureExtraDetail() { ReferenceDocs.S3_COMPATIBLE_REPOSITORIES ); } + + // only one multipart cleanup process running at once + private final AtomicBoolean multipartCleanupInProgress = new AtomicBoolean(); + + @Override + public void deleteSnapshots( + Collection snapshotIds, + long repositoryDataGeneration, + IndexVersion minimumNodeVersion, + SnapshotDeleteListener snapshotDeleteListener + ) { + getMultipartUploadCleanupListener( + isReadOnly() ? 0 : MAX_MULTIPART_UPLOAD_CLEANUP_SIZE.get(getMetadata().settings()), + new ActionListener<>() { + @Override + public void onResponse(ActionListener multipartUploadCleanupListener) { + S3Repository.super.deleteSnapshots( + snapshotIds, + repositoryDataGeneration, + minimumNodeVersion, + new SnapshotDeleteListener() { + @Override + public void onDone() { + snapshotDeleteListener.onDone(); + } + + @Override + public void onRepositoryDataWritten(RepositoryData repositoryData) { + multipartUploadCleanupListener.onResponse(null); + snapshotDeleteListener.onRepositoryDataWritten(repositoryData); + } + + @Override + public void onFailure(Exception e) { + multipartUploadCleanupListener.onFailure(e); + snapshotDeleteListener.onFailure(e); + } + } + ); + } + + @Override + public void onFailure(Exception e) { + logger.warn("failed to get multipart uploads for cleanup during snapshot delete", e); + assert false : e; // getMultipartUploadCleanupListener doesn't throw and snapshotExecutor doesn't reject anything + snapshotDeleteListener.onFailure(e); + } + } + ); + } + + /** + * Capture the current list of multipart uploads, and (asynchronously) return a listener which, if completed successfully, aborts those + * uploads. Called at the start of a snapshot delete operation, at which point there should be no ongoing uploads (except in the case of + * a master failover). We protect against the master failover case by waiting until the delete operation successfully updates the root + * index-N blob before aborting any uploads. + */ + void getMultipartUploadCleanupListener(int maxUploads, ActionListener> listener) { + if (maxUploads == 0) { + listener.onResponse(ActionListener.noop()); + return; + } + + if (multipartCleanupInProgress.compareAndSet(false, true) == false) { + logger.info("multipart upload cleanup already in progress"); + listener.onResponse(ActionListener.noop()); + return; + } + + try (var refs = new RefCountingRunnable(() -> multipartCleanupInProgress.set(false))) { + snapshotExecutor.execute( + ActionRunnable.supply( + ActionListener.releaseAfter(listener, refs.acquire()), + () -> blobContainer() instanceof S3BlobContainer s3BlobContainer + ? s3BlobContainer.getMultipartUploadCleanupListener(maxUploads, refs) + : ActionListener.noop() + ) + ); + } + } } diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java index 7f363fe0b87c3..447e225005b58 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java @@ -61,6 +61,7 @@ public class S3HttpHandler implements HttpHandler { private final String bucket; private final String path; + private final String basePrefix; private final ConcurrentMap blobs = new ConcurrentHashMap<>(); private final ConcurrentMap uploads = new ConcurrentHashMap<>(); @@ -71,6 +72,7 @@ public S3HttpHandler(final String bucket) { public S3HttpHandler(final String bucket, @Nullable final String basePath) { this.bucket = Objects.requireNonNull(bucket); + this.basePrefix = Objects.requireNonNullElse(basePath, ""); this.path = bucket + (basePath != null && basePath.isEmpty() == false ? "/" + basePath : ""); } @@ -96,7 +98,9 @@ public void handle(final HttpExchange exchange) throws IOException { } else { exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); } - } else if (Regex.simpleMatch("GET /" + bucket + "/?uploads&prefix=*", request)) { + } else if (isListMultipartUploadsRequest(request)) { + assert request.contains("prefix=" + basePrefix) : basePrefix + " vs " + request; + final Map params = new HashMap<>(); RestUtils.decodeQueryString(request, request.indexOf('?') + 1, params); final var prefix = params.get("prefix"); @@ -329,6 +333,11 @@ public void handle(final HttpExchange exchange) throws IOException { } } + private boolean isListMultipartUploadsRequest(String request) { + return Regex.simpleMatch("GET /" + bucket + "/?uploads&prefix=*", request) + || Regex.simpleMatch("GET /" + bucket + "/?uploads&max-uploads=*&prefix=*", request); + } + public Map blobs() { return blobs; } From 19dc8841d4053fd449a47c4e9bd26d35767d649e Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Mon, 19 Aug 2024 09:52:41 -0700 Subject: [PATCH 078/389] Fix synthetic source for empty nested objects (#111943) --- docs/changelog/111943.yaml | 6 ++ ...ogsIndexModeRandomDataChallengeRestIT.java | 7 +- .../index/mapper/NestedObjectMapper.java | 5 +- .../index/mapper/ObjectMapper.java | 14 +-- .../index/mapper/NestedObjectMapperTests.java | 91 +++++++++++++++++++ 5 files changed, 106 insertions(+), 17 deletions(-) create mode 100644 docs/changelog/111943.yaml diff --git a/docs/changelog/111943.yaml b/docs/changelog/111943.yaml new file mode 100644 index 0000000000000..6b9f03ccee31c --- /dev/null +++ b/docs/changelog/111943.yaml @@ -0,0 +1,6 @@ +pr: 111943 +summary: Fix synthetic source for empty nested objects +area: Mapping +type: bug +issues: + - 111811 diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java index 0b41d62f6fe2c..8f23f86267261 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java @@ -42,10 +42,9 @@ public StandardVersusLogsIndexModeRandomDataChallengeRestIT() { this.subobjectsDisabled = randomBoolean(); var specificationBuilder = DataGeneratorSpecification.builder(); - // TODO enable nested fields when subobjects are enabled - // It currently hits a bug with empty nested objects - // Nested fields don't work with subobjects: false. - specificationBuilder = specificationBuilder.withNestedFieldsLimit(0); + if (subobjectsDisabled) { + specificationBuilder = specificationBuilder.withNestedFieldsLimit(0); + } this.dataGenerator = new DataGenerator(specificationBuilder.withDataSourceHandlers(List.of(new DataSourceHandler() { @Override public DataSourceResponse.FieldTypeGenerator handle(DataSourceRequest.FieldTypeGenerator request) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index 76212f9899f5c..23bdd0f559206 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -463,17 +463,14 @@ public boolean hasValue() { public void write(XContentBuilder b) throws IOException { assert (children != null && children.size() > 0); if (children.size() == 1) { - b.startObject(leafName()); + b.field(leafName()); leafStoredFieldLoader.advanceTo(children.get(0)); leafSourceLoader.write(leafStoredFieldLoader, children.get(0), b); - b.endObject(); } else { b.startArray(leafName()); for (int childId : children) { - b.startObject(); leafStoredFieldLoader.advanceTo(childId); leafSourceLoader.write(leafStoredFieldLoader, childId, b); - b.endObject(); } b.endArray(); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index a3d5999a3dcd2..843fc3b15a6df 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -843,12 +843,10 @@ public void write(XContentBuilder b) throws IOException { return; } - if (isFragment == false) { - if (isRoot()) { - b.startObject(); - } else { - b.startObject(leafName()); - } + if (isRoot() || isFragment) { + b.startObject(); + } else { + b.startObject(leafName()); } if (ignoredValues != null && ignoredValues.isEmpty() == false) { @@ -875,9 +873,7 @@ public void write(XContentBuilder b) throws IOException { } } hasValue = false; - if (isFragment == false) { - b.endObject(); - } + b.endObject(); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index 306887099849b..4fba22101df03 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -1737,6 +1737,97 @@ public void testSyntheticNestedWithIncludeInRoot() throws IOException { {"path":{"bar":"B","foo":"A"}}""", syntheticSource); } + public void testSyntheticNestedWithEmptyObject() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").field("type", "nested"); + { + b.startObject("properties"); + { + b.startObject("foo").field("type", "keyword").endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { b.startObject("path").nullField("foo").endObject(); }); + assertEquals(""" + {"path":{}}""", syntheticSource); + } + + public void testSyntheticNestedWithEmptySubObject() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").field("type", "nested"); + { + b.startObject("properties"); + { + b.startObject("to").startObject("properties"); + { + b.startObject("foo").field("type", "keyword").endObject(); + } + b.endObject().endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startObject("path"); + { + b.startObject("to").nullField("foo").endObject(); + } + b.endObject(); + }); + assertEquals(""" + {"path":{}}""", syntheticSource); + } + + public void testSyntheticNestedWithArrayContainingEmptyObject() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").field("type", "nested"); + { + b.startObject("properties"); + { + b.startObject("foo").field("type", "keyword").endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startArray("path"); + { + b.startObject().field("foo", "A").endObject(); + b.startObject().nullField("foo").endObject(); + } + b.endArray(); + }); + assertEquals(""" + {"path":[{"foo":"A"},{}]}""", syntheticSource); + } + + public void testSyntheticNestedWithArrayContainingOnlyEmptyObject() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").field("type", "nested"); + { + b.startObject("properties"); + { + b.startObject("foo").field("type", "keyword").endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startArray("path"); + { + b.startObject().nullField("foo").endObject(); + } + b.endArray(); + }); + assertEquals(""" + {"path":{}}""", syntheticSource); + } + private NestedObjectMapper createNestedObjectMapperWithAllParametersSet(CheckedConsumer propertiesBuilder) throws IOException { DocumentMapper mapper = createDocumentMapper(mapping(b -> { From 6f33812fade9c69745fe2a3d66b063028c79f1d4 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 19 Aug 2024 12:02:04 -0700 Subject: [PATCH 079/389] Avoid losing error message in failure collector (#111983) The node-disconnected exception might not include the root cause. In this case, the failure collector incorrectly unwraps the exception and wraps it in a new Elasticsearch exception, losing the message. We should instead use the original exception to preserve the reason. Closes #111894 --- docs/changelog/111983.yaml | 6 ++++++ .../compute/operator/FailureCollector.java | 17 +++++++++++++---- .../operator/FailureCollectorTests.java | 19 +++++++++++++++++++ 3 files changed, 38 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/111983.yaml diff --git a/docs/changelog/111983.yaml b/docs/changelog/111983.yaml new file mode 100644 index 0000000000000..d5043d0b44155 --- /dev/null +++ b/docs/changelog/111983.yaml @@ -0,0 +1,6 @@ +pr: 111983 +summary: Avoid losing error message in failure collector +area: ES|QL +type: bug +issues: + - 111894 diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java index 99edab038af31..943ba4dc1f4fa 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java @@ -46,10 +46,19 @@ public FailureCollector(int maxExceptions) { this.maxExceptions = maxExceptions; } - public void unwrapAndCollect(Exception originEx) { - final Exception e = originEx instanceof TransportException - ? (originEx.getCause() instanceof Exception cause ? cause : new ElasticsearchException(originEx.getCause())) - : originEx; + private static Exception unwrapTransportException(TransportException te) { + final Throwable cause = te.getCause(); + if (cause == null) { + return te; + } else if (cause instanceof Exception ex) { + return ex; + } else { + return new ElasticsearchException(cause); + } + } + + public void unwrapAndCollect(Exception e) { + e = e instanceof TransportException te ? unwrapTransportException(te) : e; if (ExceptionsHelper.unwrap(e, TaskCancelledException.class) != null) { if (cancelledExceptionsCount.incrementAndGet() <= maxExceptions) { cancelledExceptions.add(e); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java index d5fa0a1eaecc9..637cbe8892b3e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java @@ -7,12 +7,15 @@ package org.elasticsearch.compute.operator; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.NodeDisconnectedException; import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.transport.TransportException; import org.hamcrest.Matchers; import java.io.IOException; @@ -25,6 +28,9 @@ import java.util.stream.IntStream; import java.util.stream.Stream; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThan; public class FailureCollectorTests extends ESTestCase { @@ -87,4 +93,17 @@ public void testEmpty() { assertFalse(collector.hasFailure()); assertNull(collector.getFailure()); } + + public void testTransportExceptions() { + FailureCollector collector = new FailureCollector(5); + collector.unwrapAndCollect(new NodeDisconnectedException(DiscoveryNodeUtils.builder("node-1").build(), "/field_caps")); + collector.unwrapAndCollect(new TransportException(new CircuitBreakingException("too large", CircuitBreaker.Durability.TRANSIENT))); + Exception failure = collector.getFailure(); + assertNotNull(failure); + assertThat(failure, instanceOf(NodeDisconnectedException.class)); + assertThat(failure.getMessage(), equalTo("[][0.0.0.0:1][/field_caps] disconnected")); + Throwable[] suppressed = failure.getSuppressed(); + assertThat(suppressed, arrayWithSize(1)); + assertThat(suppressed[0], instanceOf(CircuitBreakingException.class)); + } } From dc24003540e02152fe64893d7c38af3f3dc31996 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 19 Aug 2024 17:29:01 -0400 Subject: [PATCH 080/389] ESQL: Profile more timing information (#111855) This profiles additional timing information for each individual driver. To the results from `profile` it adds the start and stop time for each driver. That was already in the task status. To the profile and task status it also adds the number of times the driver slept and some more detailed history about a few of those times. Explanation time! The compute engine splits work into some number of `Drivers` per node. Each `Driver` is a single threaded entity - it runs on a thread for a while then does one of three things: 1. Finishes 2. Goes async because one of it's `Operator`s has gone async 3. Yields the thread pool because it has run for too long This PR measures the second two. At this point only three operators can go async: * ENRICH * Reading from an empty exchange * Writing to a full exchange We're quite interested the these sleeps at the moment because they think they may be slowing things down. Here's what it looks like when a driver goes async because it wants to read from an empty exchange: ``` ... the rest of the profile ... "sleeps" : { "counts" : { "exchange empty" : 2 }, "first" : [ { "reason" : "exchange empty", "sleep" : "2024-08-13T19:45:57.943Z", "sleep_millis" : 1723578357943, "wake" : "2024-08-13T19:45:58.159Z", "wake_millis" : 1723578358159 }, { "reason" : "exchange empty", "sleep" : "2024-08-13T19:45:58.164Z", "sleep_millis" : 1723578358164, "wake" : "2024-08-13T19:45:58.165Z", "wake_millis" : 1723578358165 } ], "last": [same as above] ``` Every time the driver goes async we count it in the `counts` map - grouped by the reason the driver slept. We also record the sleep and wake times for the first and last ten times the driver sleeps. In this case it only slept twice, so the `first` and `last` ten times is the same array. This should give us a good sense about why drivers sleep while using a limited amount of memory per driver. --- docs/changelog/111855.yaml | 5 + .../description/to_datetime.asciidoc | 2 + .../kibana/definition/to_datetime.json | 1 + .../esql/functions/kibana/docs/to_datetime.md | 1 + .../org/elasticsearch/TransportVersions.java | 1 + .../compute/operator/AsyncOperator.java | 4 +- .../compute/operator/Driver.java | 108 ++++++-- .../compute/operator/DriverProfile.java | 81 +++++- .../compute/operator/DriverSleeps.java | 180 +++++++++++++ .../compute/operator/DriverStatus.java | 38 ++- .../compute/operator/IsBlockedResult.java | 31 +++ .../compute/operator/Operator.java | 4 +- .../operator/exchange/ExchangeBuffer.java | 9 +- .../operator/exchange/ExchangeSink.java | 4 +- .../exchange/ExchangeSinkHandler.java | 3 +- .../exchange/ExchangeSinkOperator.java | 6 +- .../operator/exchange/ExchangeSource.java | 4 +- .../exchange/ExchangeSourceHandler.java | 11 +- .../exchange/ExchangeSourceOperator.java | 10 +- .../compute/operator/AsyncOperatorTests.java | 49 ++-- .../compute/operator/DriverProfileTests.java | 60 ++++- .../compute/operator/DriverSleepsTests.java | 240 ++++++++++++++++++ .../compute/operator/DriverStatusTests.java | 42 ++- .../exchange/ExchangeServiceTests.java | 28 +- .../xpack/esql/qa/single_node/RestEsqlIT.java | 126 ++++++++- .../action/EsqlQueryResponseProfileTests.java | 6 +- .../esql/action/EsqlQueryResponseTests.java | 15 +- .../esql/plugin/ComputeListenerTests.java | 13 +- 28 files changed, 954 insertions(+), 128 deletions(-) create mode 100644 docs/changelog/111855.yaml create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/IsBlockedResult.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverSleepsTests.java diff --git a/docs/changelog/111855.yaml b/docs/changelog/111855.yaml new file mode 100644 index 0000000000000..3f15e9c20135a --- /dev/null +++ b/docs/changelog/111855.yaml @@ -0,0 +1,5 @@ +pr: 111855 +summary: "ESQL: Profile more timing information" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/reference/esql/functions/description/to_datetime.asciidoc b/docs/reference/esql/functions/description/to_datetime.asciidoc index b37bd6b22ac2f..ee6866da9ee34 100644 --- a/docs/reference/esql/functions/description/to_datetime.asciidoc +++ b/docs/reference/esql/functions/description/to_datetime.asciidoc @@ -3,3 +3,5 @@ *Description* Converts an input value to a date value. A string will only be successfully converted if it's respecting the format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. To convert dates in other formats, use <>. + +NOTE: Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date istruncated, not rounded. diff --git a/docs/reference/esql/functions/kibana/definition/to_datetime.json b/docs/reference/esql/functions/kibana/definition/to_datetime.json index 10fcf8b22e8b0..778d151c40151 100644 --- a/docs/reference/esql/functions/kibana/definition/to_datetime.json +++ b/docs/reference/esql/functions/kibana/definition/to_datetime.json @@ -3,6 +3,7 @@ "type" : "eval", "name" : "to_datetime", "description" : "Converts an input value to a date value.\nA string will only be successfully converted if it's respecting the format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`.\nTo convert dates in other formats, use <>.", + "note" : "Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date istruncated, not rounded.", "signatures" : [ { "params" : [ diff --git a/docs/reference/esql/functions/kibana/docs/to_datetime.md b/docs/reference/esql/functions/kibana/docs/to_datetime.md index 5e8f9c72adc2c..613381615421a 100644 --- a/docs/reference/esql/functions/kibana/docs/to_datetime.md +++ b/docs/reference/esql/functions/kibana/docs/to_datetime.md @@ -11,3 +11,4 @@ To convert dates in other formats, use <>. ROW string = ["1953-09-02T00:00:00.000Z", "1964-06-02T00:00:00.000Z", "1964-06-02 00:00:00"] | EVAL datetime = TO_DATETIME(string) ``` +Note: Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date istruncated, not rounded. diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 1009d9e2ae7d1..3bece535aab0f 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -192,6 +192,7 @@ static TransportVersion def(int id) { public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_722_00_0); public static final TransportVersion REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES = def(8_723_00_0); public static final TransportVersion RANDOM_RERANKER_RETRIEVER = def(8_724_00_0); + public static final TransportVersion ESQL_PROFILE_SLEEPS = def(8_725_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java index 79359737b1b35..92213eca7b477 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java @@ -199,7 +199,7 @@ public Page getOutput() { } @Override - public SubscribableListener isBlocked() { + public IsBlockedResult isBlocked() { // TODO: Add an exchange service between async operation instead? if (finished) { return Operator.NOT_BLOCKED; @@ -216,7 +216,7 @@ public SubscribableListener isBlocked() { if (blockedFuture == null) { blockedFuture = new SubscribableListener<>(); } - return blockedFuture; + return new IsBlockedResult(blockedFuture, getClass().getSimpleName()); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java index 785db826aadd6..acbf8a17b31fd 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java @@ -127,7 +127,17 @@ public Driver( this.statusNanos = statusInterval.nanos(); this.releasable = releasable; this.status = new AtomicReference<>( - new DriverStatus(sessionId, startTime, System.currentTimeMillis(), 0, 0, DriverStatus.Status.QUEUED, List.of(), List.of()) + new DriverStatus( + sessionId, + startTime, + System.currentTimeMillis(), + 0, + 0, + DriverStatus.Status.QUEUED, + List.of(), + List.of(), + DriverSleeps.empty() + ) ); } @@ -170,35 +180,36 @@ public DriverContext driverContext() { * thread to do other work instead of blocking or busy-spinning on the blocked operator. */ SubscribableListener run(TimeValue maxTime, int maxIterations, LongSupplier nowSupplier) { + updateStatus(0, 0, DriverStatus.Status.RUNNING, "driver running"); long maxTimeNanos = maxTime.nanos(); long startTime = nowSupplier.getAsLong(); long nextStatus = startTime + statusNanos; int iter = 0; while (true) { - SubscribableListener fut = runSingleLoopIteration(); + IsBlockedResult isBlocked = runSingleLoopIteration(); iter++; - if (fut.isDone() == false) { - updateStatus(nowSupplier.getAsLong() - startTime, iter, DriverStatus.Status.ASYNC); - return fut; + if (isBlocked.listener().isDone() == false) { + updateStatus(nowSupplier.getAsLong() - startTime, iter, DriverStatus.Status.ASYNC, isBlocked.reason()); + return isBlocked.listener(); } if (isFinished()) { finishNanos = nowSupplier.getAsLong(); - updateStatus(finishNanos - startTime, iter, DriverStatus.Status.DONE); + updateStatus(finishNanos - startTime, iter, DriverStatus.Status.DONE, "driver done"); driverContext.finish(); Releasables.close(releasable, driverContext.getSnapshot()); - return Operator.NOT_BLOCKED; + return Operator.NOT_BLOCKED.listener(); } long now = nowSupplier.getAsLong(); if (iter >= maxIterations) { - updateStatus(now - startTime, iter, DriverStatus.Status.WAITING); - return Operator.NOT_BLOCKED; + updateStatus(now - startTime, iter, DriverStatus.Status.WAITING, "driver iterations"); + return Operator.NOT_BLOCKED.listener(); } if (now - startTime >= maxTimeNanos) { - updateStatus(now - startTime, iter, DriverStatus.Status.WAITING); - return Operator.NOT_BLOCKED; + updateStatus(now - startTime, iter, DriverStatus.Status.WAITING, "driver time"); + return Operator.NOT_BLOCKED.listener(); } if (now > nextStatus) { - updateStatus(now - startTime, iter, DriverStatus.Status.RUNNING); + updateStatus(now - startTime, iter, DriverStatus.Status.RUNNING, "driver running"); nextStatus = now + statusNanos; } } @@ -230,7 +241,7 @@ public void abort(Exception reason, ActionListener listener) { } } - private SubscribableListener runSingleLoopIteration() { + private IsBlockedResult runSingleLoopIteration() { ensureNotCancelled(); boolean movedPage = false; @@ -239,7 +250,7 @@ private SubscribableListener runSingleLoopIteration() { Operator nextOp = activeOperators.get(i + 1); // skip blocked operator - if (op.isBlocked().isDone() == false) { + if (op.isBlocked().listener().isDone() == false) { continue; } @@ -290,7 +301,10 @@ private SubscribableListener runSingleLoopIteration() { if (movedPage == false) { return oneOf( - activeOperators.stream().map(Operator::isBlocked).filter(laf -> laf.isDone() == false).collect(Collectors.toList()) + activeOperators.stream() + .map(Operator::isBlocked) + .filter(laf -> laf.listener().isDone() == false) + .collect(Collectors.toList()) ); } return Operator.NOT_BLOCKED; @@ -327,7 +341,7 @@ public static void start( ) { driver.completionListener.addListener(listener); if (driver.started.compareAndSet(false, true)) { - driver.updateStatus(0, 0, DriverStatus.Status.STARTING); + driver.updateStatus(0, 0, DriverStatus.Status.STARTING, "driver starting"); schedule(DEFAULT_TIME_BEFORE_YIELDING, maxIterations, threadContext, executor, driver, driver.completionListener); } } @@ -394,18 +408,23 @@ void onComplete(ActionListener listener) { }); } - private static SubscribableListener oneOf(List> futures) { - if (futures.isEmpty()) { + private static IsBlockedResult oneOf(List results) { + if (results.isEmpty()) { return Operator.NOT_BLOCKED; } - if (futures.size() == 1) { - return futures.get(0); + if (results.size() == 1) { + return results.get(0); } SubscribableListener oneOf = new SubscribableListener<>(); - for (SubscribableListener fut : futures) { - fut.addListener(oneOf); + StringBuilder reason = new StringBuilder(); + for (IsBlockedResult r : results) { + r.listener().addListener(oneOf); + if (reason.isEmpty() == false) { + reason.append(" OR "); + } + reason.append(r.reason()); } - return oneOf; + return new IsBlockedResult(oneOf, reason.toString()); } @Override @@ -440,7 +459,15 @@ public DriverProfile profile() { if (status.status() != DriverStatus.Status.DONE) { throw new IllegalStateException("can only get profile from finished driver"); } - return new DriverProfile(finishNanos - startNanos, status.cpuNanos(), status.iterations(), status.completedOperators()); + return new DriverProfile( + status.started(), + status.lastUpdated(), + finishNanos - startNanos, + status.cpuNanos(), + status.iterations(), + status.completedOperators(), + status.sleeps() + ); } /** @@ -449,17 +476,44 @@ public DriverProfile profile() { * @param extraIterations how many iterations to add to the previous status * @param status the status of the overall driver request */ - private void updateStatus(long extraCpuNanos, int extraIterations, DriverStatus.Status status) { + private void updateStatus(long extraCpuNanos, int extraIterations, DriverStatus.Status status, String reason) { this.status.getAndUpdate(prev -> { + long now = System.currentTimeMillis(); + DriverSleeps sleeps = prev.sleeps(); + + // Rebuild the sleeps or bail entirely based on the updated status. + // Sorry for the complexity here. If anyone has a nice way to refactor this, be my guest. + switch (status) { + case ASYNC, WAITING -> sleeps = sleeps.sleep(reason, now); + case RUNNING -> { + switch (prev.status()) { + case ASYNC, WAITING -> sleeps = sleeps.wake(now); + case STARTING -> { + if (extraIterations == 0) { + /* + * 0 extraIterations means we haven't started the loop - we're just + * signaling that we've woken up. We don't need to signal that when + * the state is already STARTING because we don't have anything + * interesting to report. And some tests rely on the status staying + * in the STARTING state until the first status report. + */ + return prev; + } + } + } + } + } + return new DriverStatus( sessionId, startTime, - System.currentTimeMillis(), + now, prev.cpuNanos() + extraCpuNanos, prev.iterations() + extraIterations, status, statusOfCompletedOperators, - activeOperators.stream().map(op -> new DriverStatus.OperatorStatus(op.toString(), op.status())).toList() + activeOperators.stream().map(op -> new DriverStatus.OperatorStatus(op.toString(), op.status())).toList(), + sleeps ); }); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java index 414fbbbca8294..e7b16072f4b66 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java @@ -27,6 +27,16 @@ * Profile results from a single {@link Driver}. */ public class DriverProfile implements Writeable, ChunkedToXContentObject { + /** + * Millis since epoch when the driver started. + */ + private final long startMillis; + + /** + * Millis since epoch when the driver stopped. + */ + private final long stopMillis; + /** * Nanos between creation and completion of the {@link Driver}. */ @@ -45,18 +55,38 @@ public class DriverProfile implements Writeable, ChunkedToXContentObject { private final long iterations; /** - * Status of each {@link Operator} in the driver when it finishes. + * Status of each {@link Operator} in the driver when it finished. */ private final List operators; - public DriverProfile(long tookNanos, long cpuNanos, long iterations, List operators) { + private final DriverSleeps sleeps; + + public DriverProfile( + long startMillis, + long stopMillis, + long tookNanos, + long cpuNanos, + long iterations, + List operators, + DriverSleeps sleeps + ) { + this.startMillis = startMillis; + this.stopMillis = stopMillis; this.tookNanos = tookNanos; this.cpuNanos = cpuNanos; this.iterations = iterations; this.operators = operators; + this.sleeps = sleeps; } public DriverProfile(StreamInput in) throws IOException { + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_SLEEPS)) { + this.startMillis = in.readVLong(); + this.stopMillis = in.readVLong(); + } else { + this.startMillis = 0; + this.stopMillis = 0; + } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { this.tookNanos = in.readVLong(); this.cpuNanos = in.readVLong(); @@ -67,16 +97,36 @@ public DriverProfile(StreamInput in) throws IOException { this.iterations = 0; } this.operators = in.readCollectionAsImmutableList(DriverStatus.OperatorStatus::new); + this.sleeps = DriverSleeps.read(in); } @Override public void writeTo(StreamOutput out) throws IOException { + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_SLEEPS)) { + out.writeVLong(startMillis); + out.writeVLong(stopMillis); + } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { out.writeVLong(tookNanos); out.writeVLong(cpuNanos); out.writeVLong(iterations); } out.writeCollection(operators); + sleeps.writeTo(out); + } + + /** + * Millis since epoch when the driver started. + */ + public long startMillis() { + return startMillis; + } + + /** + * Millis since epoch when the driver stopped. + */ + public long stopMillis() { + return stopMillis; } /** @@ -102,13 +152,25 @@ public long iterations() { return iterations; } + /** + * Status of each {@link Operator} in the driver when it finished. + */ public List operators() { return operators; } + /** + * Records of the times the driver has slept. + */ + public DriverSleeps sleeps() { + return sleeps; + } + @Override public Iterator toXContentChunked(ToXContent.Params params) { return Iterators.concat(ChunkedToXContentHelper.startObject(), Iterators.single((b, p) -> { + b.timeField("start_millis", "start", startMillis); + b.timeField("stop_millis", "stop", stopMillis); b.field("took_nanos", tookNanos); if (b.humanReadable()) { b.field("took_time", TimeValue.timeValueNanos(tookNanos)); @@ -119,7 +181,11 @@ public Iterator toXContentChunked(ToXContent.Params params } b.field("iterations", iterations); return b; - }), ChunkedToXContentHelper.array("operators", operators.iterator()), ChunkedToXContentHelper.endObject()); + }), + ChunkedToXContentHelper.array("operators", operators.iterator()), + Iterators.single((b, p) -> b.field("sleeps", sleeps)), + ChunkedToXContentHelper.endObject() + ); } @Override @@ -131,15 +197,18 @@ public boolean equals(Object o) { return false; } DriverProfile that = (DriverProfile) o; - return tookNanos == that.tookNanos + return startMillis == that.startMillis + && stopMillis == that.stopMillis + && tookNanos == that.tookNanos && cpuNanos == that.cpuNanos && iterations == that.iterations - && Objects.equals(operators, that.operators); + && Objects.equals(operators, that.operators) + && sleeps.equals(that.sleeps); } @Override public int hashCode() { - return Objects.hash(tookNanos, cpuNanos, iterations, operators); + return Objects.hash(startMillis, stopMillis, tookNanos, cpuNanos, iterations, operators, sleeps); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java new file mode 100644 index 0000000000000..217a0b033bed4 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java @@ -0,0 +1,180 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +/** + * Records of the times the driver has slept. + * @param counts map from the reason the driver has slept to the number of times it slept for that reason + * @param first the first few times the driver slept + * @param last the last few times the driver slept + */ +public record DriverSleeps(Map counts, List first, List last) implements Writeable, ToXContentObject { + /** + * A record of a time the driver slept. + * @param reason The reason the driver slept + * @param sleep Millis since epoch when the driver slept + * @param wake Millis since epoch when the driver woke, or 0 if it is currently sleeping + */ + public record Sleep(String reason, long sleep, long wake) implements Writeable, ToXContentObject { + Sleep(StreamInput in) throws IOException { + this(in.readString(), in.readLong(), in.readLong()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(reason); + out.writeLong(sleep); + out.writeLong(wake); + } + + Sleep wake(long now) { + if (isStillSleeping() == false) { + throw new IllegalStateException("Already awake."); + } + return new Sleep(reason, sleep, now); + } + + public boolean isStillSleeping() { + return wake == 0; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("reason", reason); + builder.timeField("sleep_millis", "sleep", sleep); + if (wake > 0) { + builder.timeField("wake_millis", "wake", wake); + } + return builder.endObject(); + } + } + + /** + * How many sleeps of the first and last sleeps and wakes to keep. + */ + static final int RECORDS = 10; + + public static DriverSleeps read(StreamInput in) throws IOException { + if (in.getTransportVersion().before(TransportVersions.ESQL_PROFILE_SLEEPS)) { + return empty(); + } + return new DriverSleeps( + in.readImmutableMap(StreamInput::readVLong), + in.readCollectionAsList(Sleep::new), + in.readCollectionAsList(Sleep::new) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (out.getTransportVersion().before(TransportVersions.ESQL_PROFILE_SLEEPS)) { + return; + } + out.writeMap(counts, StreamOutput::writeVLong); + out.writeCollection(first); + out.writeCollection(last); + } + + public static DriverSleeps empty() { + return new DriverSleeps(Map.of(), List.of(), List.of()); + } + + /** + * Record a sleep. + * @param reason the reason for the sleep + * @param now the current time + */ + public DriverSleeps sleep(String reason, long now) { + if (last.isEmpty() == false) { + Sleep lastLast = last.get(last.size() - 1); + if (lastLast.isStillSleeping()) { + throw new IllegalStateException("Still sleeping."); + } + } + Map newCounts = new TreeMap<>(counts); + newCounts.compute(reason, (k, v) -> v == null ? 1 : v + 1); + List newFirst = first.size() < RECORDS ? append(first, reason, now) : first; + List newLast = last.size() < RECORDS ? append(last, reason, now) : rollOnto(last, reason, now); + return new DriverSleeps(newCounts, newFirst, newLast); + } + + /** + * Record a wake. + * @param now the current time + */ + public DriverSleeps wake(long now) { + if (now == 0) { + throw new IllegalStateException("Can't wake at epoch. That's used to signal sleeping."); + } + if (last.isEmpty()) { + throw new IllegalStateException("Never slept."); + } + Sleep lastFirst = first.get(first.size() - 1); + List newFirst = lastFirst.wake == 0 ? wake(first, now) : first; + return new DriverSleeps(counts, newFirst, wake(last, now)); + } + + private List append(List old, String reason, long now) { + List sleeps = new ArrayList<>(old.size() + 1); + sleeps.addAll(old); + sleeps.add(new Sleep(reason, now, 0)); + return Collections.unmodifiableList(sleeps); + } + + private List rollOnto(List old, String reason, long now) { + List sleeps = new ArrayList<>(old.size()); + for (int i = 1; i < old.size(); i++) { + sleeps.add(old.get(i)); + } + sleeps.add(new Sleep(reason, now, 0)); + return Collections.unmodifiableList(sleeps); + } + + private List wake(List old, long now) { + List sleeps = new ArrayList<>(old); + sleeps.set(sleeps.size() - 1, old.get(old.size() - 1).wake(now)); + return Collections.unmodifiableList(sleeps); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject("counts"); + for (Map.Entry count : counts.entrySet()) { + builder.field(count.getKey(), count.getValue()); + } + builder.endObject(); + toXContent(builder, params, "first", first); + toXContent(builder, params, "last", last); + return builder.endObject(); + } + + private static void toXContent(XContentBuilder builder, ToXContent.Params params, String name, List sleeps) throws IOException { + builder.startArray(name); + for (Sleep sleep : sleeps) { + sleep.toXContent(builder, params); + } + builder.endArray(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java index c7a0c7d4bacb9..42e3908231206 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java @@ -79,6 +79,8 @@ public class DriverStatus implements Task.Status { */ private final List activeOperators; + private final DriverSleeps sleeps; + DriverStatus( String sessionId, long started, @@ -87,7 +89,8 @@ public class DriverStatus implements Task.Status { long iterations, Status status, List completedOperators, - List activeOperators + List activeOperators, + DriverSleeps sleeps ) { this.sessionId = sessionId; this.started = started; @@ -97,6 +100,7 @@ public class DriverStatus implements Task.Status { this.status = status; this.completedOperators = completedOperators; this.activeOperators = activeOperators; + this.sleeps = sleeps; } public DriverStatus(StreamInput in) throws IOException { @@ -105,13 +109,14 @@ public DriverStatus(StreamInput in) throws IOException { this.lastUpdated = in.readLong(); this.cpuNanos = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readVLong() : 0; this.iterations = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readVLong() : 0; - this.status = Status.valueOf(in.readString()); + this.status = Status.read(in); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { this.completedOperators = in.readCollectionAsImmutableList(OperatorStatus::new); } else { this.completedOperators = List.of(); } this.activeOperators = in.readCollectionAsImmutableList(OperatorStatus::new); + this.sleeps = DriverSleeps.read(in); } @Override @@ -125,11 +130,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(cpuNanos); out.writeVLong(iterations); } - out.writeString(status.toString()); + status.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { out.writeCollection(completedOperators); } out.writeCollection(activeOperators); + sleeps.writeTo(out); } @Override @@ -188,6 +194,13 @@ public List completedOperators() { return completedOperators; } + /** + * Records of the times the driver has slept. + */ + public DriverSleeps sleeps() { + return sleeps; + } + /** * Status of each active {@link Operator} in the driver. */ @@ -206,7 +219,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("cpu_time", TimeValue.timeValueNanos(cpuNanos)); } builder.field("iterations", iterations); - builder.field("status", status.toString().toLowerCase(Locale.ROOT)); + builder.field("status", status, params); builder.startArray("completed_operators"); for (OperatorStatus completed : completedOperators) { builder.value(completed); @@ -217,6 +230,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.value(active); } builder.endArray(); + builder.field("sleeps", sleeps, params); return builder.endObject(); } @@ -232,12 +246,13 @@ public boolean equals(Object o) { && iterations == that.iterations && status == that.status && completedOperators.equals(that.completedOperators) - && activeOperators.equals(that.activeOperators); + && activeOperators.equals(that.activeOperators) + && sleeps.equals(that.sleeps); } @Override public int hashCode() { - return Objects.hash(sessionId, started, lastUpdated, cpuNanos, iterations, status, completedOperators, activeOperators); + return Objects.hash(sessionId, started, lastUpdated, cpuNanos, iterations, status, completedOperators, activeOperators, sleeps); } @Override @@ -313,7 +328,7 @@ public String toString() { } } - public enum Status implements ToXContentFragment { + public enum Status implements Writeable, ToXContentFragment { QUEUED, STARTING, RUNNING, @@ -321,6 +336,15 @@ public enum Status implements ToXContentFragment { WAITING, DONE; + public static Status read(StreamInput in) throws IOException { + return Status.valueOf(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(toString()); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.value(toString().toLowerCase(Locale.ROOT)); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/IsBlockedResult.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/IsBlockedResult.java new file mode 100644 index 0000000000000..9e9c64dfbfed4 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/IsBlockedResult.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.action.support.SubscribableListener; + +import java.util.Map; + +/** + * Is this {@link Operator} blocked? + *

+ * If the {@link #listener}'s {@link SubscribableListener#isDone()} method + * returns {@code true} then the {@linkplain Operator} is not blocked. + *

+ *

+ * If the {@linkplain Operator} is blocked then you can + * {@link SubscribableListener#addListener} to the {@link #listener} to be + * notified when the {@linkplain Operator} is unblocked. + *

+ * @param listener a listener to check for blocked-ness + * @param reason the reason that the {@linkplain Operator} is blocked. + * This is used as a {@link Map} key so this shouldn't + * vary wildly, but it should be descriptive of the reason + * the operator went async. + */ +public record IsBlockedResult(SubscribableListener listener, String reason) {} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java index 1038277c39fe1..663e06756551b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java @@ -88,11 +88,11 @@ default Status status() { * If the operator is not blocked, this method returns {@link #NOT_BLOCKED} which is an already * completed future. */ - default SubscribableListener isBlocked() { + default IsBlockedResult isBlocked() { return NOT_BLOCKED; } - SubscribableListener NOT_BLOCKED = SubscribableListener.newSucceeded(null); + IsBlockedResult NOT_BLOCKED = new IsBlockedResult(SubscribableListener.newSucceeded(null), "not blocked"); /** * A factory for creating intermediate operators. diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeBuffer.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeBuffer.java index df6c09ea1ff97..ce400ddbdd6f9 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeBuffer.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeBuffer.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.IsBlockedResult; import org.elasticsearch.compute.operator.Operator; import java.util.Queue; @@ -83,7 +84,7 @@ private void notifyNotFull() { } } - SubscribableListener waitForWriting() { + IsBlockedResult waitForWriting() { // maxBufferSize check is not water-tight as more than one sink can pass this check at the same time. if (queueSize.get() < maxSize || noMoreInputs) { return Operator.NOT_BLOCKED; @@ -95,11 +96,11 @@ SubscribableListener waitForWriting() { if (notFullFuture == null) { notFullFuture = new SubscribableListener<>(); } - return notFullFuture; + return new IsBlockedResult(notFullFuture, "exchange full"); } } - SubscribableListener waitForReading() { + IsBlockedResult waitForReading() { if (size() > 0 || noMoreInputs) { return Operator.NOT_BLOCKED; } @@ -110,7 +111,7 @@ SubscribableListener waitForReading() { if (notEmptyFuture == null) { notEmptyFuture = new SubscribableListener<>(); } - return notEmptyFuture; + return new IsBlockedResult(notEmptyFuture, "exchange empty"); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSink.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSink.java index 8f0208740b689..e96ca9e39b7e5 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSink.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSink.java @@ -7,8 +7,8 @@ package org.elasticsearch.compute.operator.exchange; -import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.IsBlockedResult; /** * Sink for exchanging data @@ -33,5 +33,5 @@ public interface ExchangeSink { /** * Whether the sink is blocked on adding more pages */ - SubscribableListener waitForWriting(); + IsBlockedResult waitForWriting(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java index ab155d6ee8479..757a3262433c8 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.IsBlockedResult; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; @@ -81,7 +82,7 @@ public boolean isFinished() { } @Override - public SubscribableListener waitForWriting() { + public IsBlockedResult waitForWriting() { return buffer.waitForWriting(); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java index 01354d681017a..dd89dfe480c36 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java @@ -9,13 +9,13 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.IsBlockedResult; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.SinkOperator; import org.elasticsearch.xcontent.XContentBuilder; @@ -65,13 +65,13 @@ public void finish() { } @Override - public SubscribableListener isBlocked() { + public IsBlockedResult isBlocked() { return sink.waitForWriting(); } @Override public boolean needsInput() { - return isFinished() == false && isBlocked().isDone(); + return isFinished() == false && isBlocked().listener().isDone(); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSource.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSource.java index 01ed5e3fb6388..aa3374aa26d3f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSource.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSource.java @@ -7,8 +7,8 @@ package org.elasticsearch.compute.operator.exchange; -import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.IsBlockedResult; /** * Source for exchanging data @@ -38,5 +38,5 @@ public interface ExchangeSource { /** * Allows callers to stop reading from the source when it's blocked */ - SubscribableListener waitForReading(); + IsBlockedResult waitForReading(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java index 77b535949eb9d..406dc4494208c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.FailureCollector; +import org.elasticsearch.compute.operator.IsBlockedResult; import org.elasticsearch.core.Releasable; import java.util.List; @@ -70,7 +71,7 @@ public boolean isFinished() { } @Override - public SubscribableListener waitForReading() { + public IsBlockedResult waitForReading() { return buffer.waitForReading(); } @@ -178,13 +179,13 @@ void fetchPage() { if (resp.finished()) { onSinkComplete(); } else { - SubscribableListener future = buffer.waitForWriting(); - if (future.isDone()) { + IsBlockedResult future = buffer.waitForWriting(); + if (future.listener().isDone()) { if (loopControl.tryResume() == false) { fetchPage(); } } else { - future.addListener(ActionListener.wrap(unused -> { + future.listener().addListener(ActionListener.wrap(unused -> { if (loopControl.tryResume() == false) { fetchPage(); } @@ -198,7 +199,7 @@ void fetchPage() { void onSinkFailed(Exception e) { failure.unwrapAndCollect(e); - buffer.waitForReading().onResponse(null); // resume the Driver if it is being blocked on reading + buffer.waitForReading().listener().onResponse(null); // resume the Driver if it is being blocked on reading onSinkComplete(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java index 1efba31bd831b..2d0ce228e81df 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java @@ -9,13 +9,13 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.IsBlockedResult; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.xcontent.XContentBuilder; @@ -30,7 +30,7 @@ public class ExchangeSourceOperator extends SourceOperator { private final ExchangeSource source; - private SubscribableListener isBlocked = NOT_BLOCKED; + private IsBlockedResult isBlocked = NOT_BLOCKED; private int pagesEmitted; public record ExchangeSourceOperatorFactory(Supplier exchangeSources) implements SourceOperatorFactory { @@ -70,10 +70,10 @@ public void finish() { } @Override - public SubscribableListener isBlocked() { - if (isBlocked.isDone()) { + public IsBlockedResult isBlocked() { + if (isBlocked.listener().isDone()) { isBlocked = source.waitForReading(); - if (isBlocked.isDone()) { + if (isBlocked.listener().isDone()) { isBlocked = NOT_BLOCKED; } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java index ae4558d5f8f71..fbcf11cd948c0 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java @@ -159,49 +159,56 @@ public void doClose() { Releasables.close(localBreaker); } - public void testStatus() { - BlockFactory blockFactory = blockFactory(); - DriverContext driverContext = new DriverContext(blockFactory.bigArrays(), blockFactory); + class TestOp extends AsyncOperator { Map> handlers = new HashMap<>(); - AsyncOperator operator = new AsyncOperator(driverContext, 2) { - @Override - protected void performAsync(Page inputPage, ActionListener listener) { - handlers.put(inputPage, listener); - } - @Override - protected void doClose() { + TestOp(DriverContext driverContext, int maxOutstandingRequests) { + super(driverContext, maxOutstandingRequests); + } - } - }; - assertTrue(operator.isBlocked().isDone()); + @Override + protected void performAsync(Page inputPage, ActionListener listener) { + handlers.put(inputPage, listener); + } + + @Override + protected void doClose() { + + } + } + + public void testStatus() { + BlockFactory blockFactory = blockFactory(); + DriverContext driverContext = new DriverContext(blockFactory.bigArrays(), blockFactory); + TestOp operator = new TestOp(driverContext, 2); + assertTrue(operator.isBlocked().listener().isDone()); assertTrue(operator.needsInput()); Page page1 = new Page(driverContext.blockFactory().newConstantNullBlock(1)); operator.addInput(page1); - assertFalse(operator.isBlocked().isDone()); - SubscribableListener blocked1 = operator.isBlocked(); + assertFalse(operator.isBlocked().listener().isDone()); + SubscribableListener blocked1 = operator.isBlocked().listener(); assertTrue(operator.needsInput()); Page page2 = new Page(driverContext.blockFactory().newConstantNullBlock(2)); operator.addInput(page2); assertFalse(operator.needsInput()); // reached the max outstanding requests - assertFalse(operator.isBlocked().isDone()); - assertThat(operator.isBlocked(), equalTo(blocked1)); + assertFalse(operator.isBlocked().listener().isDone()); + assertThat(operator.isBlocked(), equalTo(new IsBlockedResult(blocked1, "TestOp"))); Page page3 = new Page(driverContext.blockFactory().newConstantNullBlock(3)); - handlers.remove(page1).onResponse(page3); + operator.handlers.remove(page1).onResponse(page3); page1.releaseBlocks(); assertFalse(operator.needsInput()); // still have 2 outstanding requests - assertTrue(operator.isBlocked().isDone()); + assertTrue(operator.isBlocked().listener().isDone()); assertTrue(blocked1.isDone()); assertThat(operator.getOutput(), equalTo(page3)); page3.releaseBlocks(); assertTrue(operator.needsInput()); - assertFalse(operator.isBlocked().isDone()); + assertFalse(operator.isBlocked().listener().isDone()); Page page4 = new Page(driverContext.blockFactory().newConstantNullBlock(3)); - handlers.remove(page2).onResponse(page4); + operator.handlers.remove(page2).onResponse(page4); page2.releaseBlocks(); assertThat(operator.getOutput(), equalTo(page4)); page4.releaseBlocks(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java index 86655bd3b7f73..27083ea0fcd13 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java @@ -20,22 +20,34 @@ import java.io.IOException; import java.util.List; +import java.util.Map; import static org.hamcrest.Matchers.equalTo; public class DriverProfileTests extends AbstractWireSerializingTestCase { public void testToXContent() { DriverProfile status = new DriverProfile( + 123413220000L, + 123413243214L, 10012, 10000, 12, List.of( new DriverStatus.OperatorStatus("LuceneSource", LuceneSourceOperatorStatusTests.simple()), new DriverStatus.OperatorStatus("ValuesSourceReader", ValuesSourceReaderOperatorStatusTests.simple()) + ), + new DriverSleeps( + Map.of("driver time", 1L), + List.of(new DriverSleeps.Sleep("driver time", 1, 1)), + List.of(new DriverSleeps.Sleep("driver time", 1, 1)) ) ); assertThat(Strings.toString(status, true, true), equalTo(""" { + "start" : "1973-11-29T09:27:00.000Z", + "start_millis" : 123413220000, + "stop" : "1973-11-29T09:27:23.214Z", + "stop_millis" : 123413243214, "took_nanos" : 10012, "took_time" : "10micros", "cpu_nanos" : 10000, @@ -54,7 +66,30 @@ public void testToXContent() { """.stripTrailing() + " " + ValuesSourceReaderOperatorStatusTests.simpleToJson().replace("\n", "\n ") + """ } - ] + ], + "sleeps" : { + "counts" : { + "driver time" : 1 + }, + "first" : [ + { + "reason" : "driver time", + "sleep" : "1970-01-01T00:00:00.001Z", + "sleep_millis" : 1, + "wake" : "1970-01-01T00:00:00.001Z", + "wake_millis" : 1 + } + ], + "last" : [ + { + "reason" : "driver time", + "sleep" : "1970-01-01T00:00:00.001Z", + "sleep_millis" : 1, + "wake" : "1970-01-01T00:00:00.001Z", + "wake_millis" : 1 + } + ] + } }""")); } @@ -69,24 +104,33 @@ protected DriverProfile createTestInstance() { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), - DriverStatusTests.randomOperatorStatuses() + randomNonNegativeLong(), + randomNonNegativeLong(), + DriverStatusTests.randomOperatorStatuses(), + DriverSleepsTests.randomDriverSleeps() ); } @Override protected DriverProfile mutateInstance(DriverProfile instance) throws IOException { + long startMillis = instance.startMillis(); + long stopMillis = instance.stopMillis(); long tookNanos = instance.tookNanos(); long cpuNanos = instance.cpuNanos(); long iterations = instance.iterations(); var operators = instance.operators(); - switch (between(0, 3)) { - case 0 -> tookNanos = randomValueOtherThan(tookNanos, ESTestCase::randomNonNegativeLong); - case 1 -> cpuNanos = randomValueOtherThan(cpuNanos, ESTestCase::randomNonNegativeLong); - case 2 -> iterations = randomValueOtherThan(iterations, ESTestCase::randomNonNegativeLong); - case 3 -> operators = randomValueOtherThan(operators, DriverStatusTests::randomOperatorStatuses); + var sleeps = instance.sleeps(); + switch (between(0, 6)) { + case 0 -> startMillis = randomValueOtherThan(startMillis, ESTestCase::randomNonNegativeLong); + case 1 -> stopMillis = randomValueOtherThan(startMillis, ESTestCase::randomNonNegativeLong); + case 2 -> tookNanos = randomValueOtherThan(tookNanos, ESTestCase::randomNonNegativeLong); + case 3 -> cpuNanos = randomValueOtherThan(cpuNanos, ESTestCase::randomNonNegativeLong); + case 4 -> iterations = randomValueOtherThan(iterations, ESTestCase::randomNonNegativeLong); + case 5 -> operators = randomValueOtherThan(operators, DriverStatusTests::randomOperatorStatuses); + case 6 -> sleeps = randomValueOtherThan(sleeps, DriverSleepsTests::randomDriverSleeps); default -> throw new UnsupportedOperationException(); } - return new DriverProfile(tookNanos, cpuNanos, iterations, operators); + return new DriverProfile(startMillis, stopMillis, tookNanos, cpuNanos, iterations, operators, sleeps); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverSleepsTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverSleepsTests.java new file mode 100644 index 0000000000000..a0d956fcd6f6f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverSleepsTests.java @@ -0,0 +1,240 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import static org.hamcrest.core.IsEqual.equalTo; + +public class DriverSleepsTests extends AbstractWireSerializingTestCase { + public static DriverSleeps randomDriverSleeps() { + return randomDriverSleeps(between(0, DriverSleeps.RECORDS * 3)); + } + + private static DriverSleeps randomDriverSleeps(int cycles) { + DriverSleeps sleeps = DriverSleeps.empty(); + long now = 0; + for (int i = 0; i < cycles; i++) { + now += between(1, 100000); + sleeps = sleeps.sleep(randomSleepReason(), now); + if (i != cycles - 1 || randomBoolean()) { + // Randomly don't wake on the last sleep + now += between(1, 100000); + sleeps = sleeps.wake(now); + } + } + return sleeps; + } + + private static String randomSleepReason() { + return randomFrom("driver time", "driver iteration", "exchange empty", "exchange full"); + } + + public void testEmptyToXContent() { + assertThat(Strings.toString(DriverSleeps.empty(), true, true), equalTo(""" + { + "counts" : { }, + "first" : [ ], + "last" : [ ] + }""")); + } + + public void testSleepingToXContent() { + assertThat(Strings.toString(DriverSleeps.empty().sleep("driver iterations", 1723555763000L), true, true), equalTo(""" + { + "counts" : { + "driver iterations" : 1 + }, + "first" : [ + { + "reason" : "driver iterations", + "sleep" : "2024-08-13T13:29:23.000Z", + "sleep_millis" : 1723555763000 + } + ], + "last" : [ + { + "reason" : "driver iterations", + "sleep" : "2024-08-13T13:29:23.000Z", + "sleep_millis" : 1723555763000 + } + ] + }""")); + } + + public void testWakingToXContent() { + assertThat( + Strings.toString(DriverSleeps.empty().sleep("driver iterations", 1723555763000L).wake(1723555863000L), true, true), + equalTo(""" + { + "counts" : { + "driver iterations" : 1 + }, + "first" : [ + { + "reason" : "driver iterations", + "sleep" : "2024-08-13T13:29:23.000Z", + "sleep_millis" : 1723555763000, + "wake" : "2024-08-13T13:31:03.000Z", + "wake_millis" : 1723555863000 + } + ], + "last" : [ + { + "reason" : "driver iterations", + "sleep" : "2024-08-13T13:29:23.000Z", + "sleep_millis" : 1723555763000, + "wake" : "2024-08-13T13:31:03.000Z", + "wake_millis" : 1723555863000 + } + ] + }""") + ); + } + + @Override + protected Writeable.Reader instanceReader() { + return DriverSleeps::read; + } + + @Override + protected DriverSleeps createTestInstance() { + return randomDriverSleeps(); + } + + @Override + protected DriverSleeps mutateInstance(DriverSleeps instance) throws IOException { + if (instance.last().isEmpty()) { + return instance.sleep(randomSleepReason(), between(1, 10000)); + } + DriverSleeps.Sleep last = instance.last().get(instance.last().size() - 1); + if (last.isStillSleeping()) { + return instance.wake(last.sleep() + between(1, 10000)); + } + return instance.sleep(randomSleepReason(), last.wake() + between(1, 10000)); + } + + public void testTracking() throws IOException { + long now = 0; + DriverSleeps sleeps = DriverSleeps.empty(); + + Map expectedCounts = new TreeMap<>(); + List expectedFirst = new ArrayList<>(); + assertThat(sleeps, equalTo(new DriverSleeps(expectedCounts, expectedFirst, expectedFirst))); + + /* + * Simulate sleeping and waking when the records aren't full. + * New sleeps and wakes should show up in both the "first" and "last" fields. + */ + for (int i = 0; i < DriverSleeps.RECORDS; i++) { + now++; + String reason = randomSleepReason(); + expectedCounts.compute(reason, (k, v) -> v == null ? 1 : v + 1); + + sleeps = sleeps.sleep(reason, now); + expectedFirst.add(new DriverSleeps.Sleep(reason, now, 0)); + assertThat(sleeps, equalTo(new DriverSleeps(expectedCounts, expectedFirst, expectedFirst))); + assertXContent(sleeps, expectedCounts, expectedFirst, expectedFirst); + + now++; + sleeps = sleeps.wake(now); + expectedFirst.set(expectedFirst.size() - 1, new DriverSleeps.Sleep(reason, now - 1, now)); + assertThat(sleeps, equalTo(new DriverSleeps(expectedCounts, expectedFirst, expectedFirst))); + assertXContent(sleeps, expectedCounts, expectedFirst, expectedFirst); + } + + /* + * Simulate sleeping and waking when the records are full. + * New sleeps and wakes should show up in only the "last" field. + */ + List expectedLast = new ArrayList<>(expectedFirst); + for (int i = 0; i < 1000; i++) { + now++; + String reason = randomSleepReason(); + expectedCounts.compute(reason, (k, v) -> v == null ? 1 : v + 1); + + sleeps = sleeps.sleep(reason, now); + expectedLast.remove(0); + expectedLast.add(new DriverSleeps.Sleep(reason, now, 0)); + assertThat(sleeps, equalTo(new DriverSleeps(expectedCounts, expectedFirst, expectedLast))); + assertXContent(sleeps, expectedCounts, expectedFirst, expectedLast); + + now++; + sleeps = sleeps.wake(now); + expectedLast.set(expectedLast.size() - 1, new DriverSleeps.Sleep(reason, now - 1, now)); + assertThat(sleeps, equalTo(new DriverSleeps(expectedCounts, expectedFirst, expectedLast))); + assertXContent(sleeps, expectedCounts, expectedFirst, expectedLast); + } + } + + public void assertXContent( + DriverSleeps sleeps, + Map expectedCounts, + List expectedFirst, + List expectedLast + ) throws IOException { + try (BytesStreamOutput expected = new BytesStreamOutput()) { + try (XContentBuilder b = new XContentBuilder(XContentType.JSON.xContent(), expected).prettyPrint().humanReadable(true)) { + b.startObject(); + b.startObject("counts"); + { + for (Map.Entry e : expectedCounts.entrySet()) { + b.field(e.getKey(), e.getValue()); + } + } + b.endObject(); + { + b.startArray("first"); + for (DriverSleeps.Sleep sleep : expectedFirst) { + sleep.toXContent(b, ToXContent.EMPTY_PARAMS); + } + b.endArray(); + } + { + b.startArray("last"); + for (DriverSleeps.Sleep sleep : expectedLast) { + sleep.toXContent(b, ToXContent.EMPTY_PARAMS); + } + b.endArray(); + } + b.endObject(); + } + assertThat(Strings.toString(sleeps, true, true), equalTo(expected.bytes().utf8ToString())); + } + } + + public void testWakeNeverSlept() { + Exception e = expectThrows(IllegalStateException.class, () -> DriverSleeps.empty().wake(1)); + assertThat(e.getMessage(), equalTo("Never slept.")); + } + + public void testWakeWhileAwake() { + Exception e = expectThrows(IllegalStateException.class, () -> DriverSleeps.empty().sleep(randomSleepReason(), 1).wake(2).wake(3)); + assertThat(e.getMessage(), equalTo("Already awake.")); + } + + public void testSleepWhileSleeping() { + Exception e = expectThrows( + IllegalStateException.class, + () -> DriverSleeps.empty().sleep(randomSleepReason(), 1).sleep(randomSleepReason(), 2) + ); + assertThat(e.getMessage(), equalTo("Still sleeping.")); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java index e82cbb831cff2..b46d9f3f4add7 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.List; +import java.util.Map; import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; @@ -40,7 +41,12 @@ public void testToXContent() { new DriverStatus.OperatorStatus("LuceneSource", LuceneSourceOperatorStatusTests.simple()), new DriverStatus.OperatorStatus("ValuesSourceReader", ValuesSourceReaderOperatorStatusTests.simple()) ), - List.of(new DriverStatus.OperatorStatus("ExchangeSink", ExchangeSinkOperatorStatusTests.simple())) + List.of(new DriverStatus.OperatorStatus("ExchangeSink", ExchangeSinkOperatorStatusTests.simple())), + new DriverSleeps( + Map.of("driver time", 1L), + List.of(new DriverSleeps.Sleep("driver time", 1, 1)), + List.of(new DriverSleeps.Sleep("driver time", 1, 1)) + ) ); assertThat(Strings.toString(status, true, true), equalTo(""" { @@ -72,7 +78,30 @@ public void testToXContent() { """.stripTrailing() + " " + ExchangeSinkOperatorStatusTests.simpleToJson().replace("\n", "\n ") + """ } - ] + ], + "sleeps" : { + "counts" : { + "driver time" : 1 + }, + "first" : [ + { + "reason" : "driver time", + "sleep" : "1970-01-01T00:00:00.001Z", + "sleep_millis" : 1, + "wake" : "1970-01-01T00:00:00.001Z", + "wake_millis" : 1 + } + ], + "last" : [ + { + "reason" : "driver time", + "sleep" : "1970-01-01T00:00:00.001Z", + "sleep_millis" : 1, + "wake" : "1970-01-01T00:00:00.001Z", + "wake_millis" : 1 + } + ] + } }""")); } @@ -91,7 +120,8 @@ protected DriverStatus createTestInstance() { randomNonNegativeLong(), randomStatus(), randomOperatorStatuses(), - randomOperatorStatuses() + randomOperatorStatuses(), + DriverSleepsTests.randomDriverSleeps() ); } @@ -127,7 +157,8 @@ protected DriverStatus mutateInstance(DriverStatus instance) throws IOException var status = instance.status(); var completedOperators = instance.completedOperators(); var activeOperators = instance.activeOperators(); - switch (between(0, 7)) { + var sleeps = instance.sleeps(); + switch (between(0, 8)) { case 0 -> sessionId = randomValueOtherThan(sessionId, this::randomSessionId); case 1 -> started = randomValueOtherThan(started, ESTestCase::randomNonNegativeLong); case 2 -> lastUpdated = randomValueOtherThan(lastUpdated, ESTestCase::randomNonNegativeLong); @@ -136,9 +167,10 @@ protected DriverStatus mutateInstance(DriverStatus instance) throws IOException case 5 -> status = randomValueOtherThan(status, this::randomStatus); case 6 -> completedOperators = randomValueOtherThan(completedOperators, DriverStatusTests::randomOperatorStatuses); case 7 -> activeOperators = randomValueOtherThan(activeOperators, DriverStatusTests::randomOperatorStatuses); + case 8 -> sleeps = randomValueOtherThan(sleeps, DriverSleepsTests::randomDriverSleeps); default -> throw new UnsupportedOperationException(); } - return new DriverStatus(sessionId, started, lastUpdated, cpuNanos, iterations, status, completedOperators, activeOperators); + return new DriverStatus(sessionId, started, lastUpdated, cpuNanos, iterations, status, completedOperators, activeOperators, sleeps); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java index 3f958464656e0..ab785e739d080 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java @@ -99,40 +99,40 @@ public void testBasic() throws Exception { sourceExchanger.addCompletionListener(sourceCompletion); ExchangeSource source = sourceExchanger.createExchangeSource(); sourceExchanger.addRemoteSink(sinkExchanger::fetchPageAsync, 1); - SubscribableListener waitForReading = source.waitForReading(); + SubscribableListener waitForReading = source.waitForReading().listener(); assertFalse(waitForReading.isDone()); assertNull(source.pollPage()); - assertTrue(sink1.waitForWriting().isDone()); + assertTrue(sink1.waitForWriting().listener().isDone()); randomFrom(sink1, sink2).addPage(pages[0]); randomFrom(sink1, sink2).addPage(pages[1]); // source and sink buffers can store 5 pages for (Page p : List.of(pages[2], pages[3], pages[4])) { ExchangeSink sink = randomFrom(sink1, sink2); - assertBusy(() -> assertTrue(sink.waitForWriting().isDone())); + assertBusy(() -> assertTrue(sink.waitForWriting().listener().isDone())); sink.addPage(p); } // sink buffer is full - assertFalse(randomFrom(sink1, sink2).waitForWriting().isDone()); - assertBusy(() -> assertTrue(source.waitForReading().isDone())); + assertFalse(randomFrom(sink1, sink2).waitForWriting().listener().isDone()); + assertBusy(() -> assertTrue(source.waitForReading().listener().isDone())); assertEquals(pages[0], source.pollPage()); - assertBusy(() -> assertTrue(source.waitForReading().isDone())); + assertBusy(() -> assertTrue(source.waitForReading().listener().isDone())); assertEquals(pages[1], source.pollPage()); // sink can write again - assertBusy(() -> assertTrue(randomFrom(sink1, sink2).waitForWriting().isDone())); + assertBusy(() -> assertTrue(randomFrom(sink1, sink2).waitForWriting().listener().isDone())); randomFrom(sink1, sink2).addPage(pages[5]); - assertBusy(() -> assertTrue(randomFrom(sink1, sink2).waitForWriting().isDone())); + assertBusy(() -> assertTrue(randomFrom(sink1, sink2).waitForWriting().listener().isDone())); randomFrom(sink1, sink2).addPage(pages[6]); // sink buffer is full - assertFalse(randomFrom(sink1, sink2).waitForWriting().isDone()); + assertFalse(randomFrom(sink1, sink2).waitForWriting().listener().isDone()); sink1.finish(); assertTrue(sink1.isFinished()); for (int i = 0; i < 5; i++) { - assertBusy(() -> assertTrue(source.waitForReading().isDone())); + assertBusy(() -> assertTrue(source.waitForReading().listener().isDone())); assertEquals(pages[2 + i], source.pollPage()); } // source buffer is empty - assertFalse(source.waitForReading().isDone()); - assertBusy(() -> assertTrue(sink2.waitForWriting().isDone())); + assertFalse(source.waitForReading().listener().isDone()); + assertBusy(() -> assertTrue(sink2.waitForWriting().listener().isDone())); sink2.finish(); assertTrue(sink2.isFinished()); assertTrue(source.isFinished()); @@ -356,13 +356,13 @@ public void testEarlyTerminate() { ExchangeSink sink = sinkExchanger.createExchangeSink(); sink.addPage(p1); sink.addPage(p2); - assertFalse(sink.waitForWriting().isDone()); + assertFalse(sink.waitForWriting().listener().isDone()); PlainActionFuture future = new PlainActionFuture<>(); sinkExchanger.fetchPageAsync(true, future); ExchangeResponse resp = future.actionGet(); assertTrue(resp.finished()); assertNull(resp.takePage()); - assertTrue(sink.waitForWriting().isDone()); + assertTrue(sink.waitForWriting().listener().isDone()); assertTrue(sink.isFinished()); } diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java index d679ee18d0a73..b0fa233965da6 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.elasticsearch.Build; import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; @@ -21,15 +22,19 @@ import org.elasticsearch.test.TestClustersThreadFilter; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.LogType; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase; import org.hamcrest.Matchers; +import org.junit.Assert; import org.junit.ClassRule; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Locale; import java.util.Map; import static org.elasticsearch.test.ListMatcher.matchesList; @@ -285,15 +290,11 @@ public void testProfile() throws IOException { .entry("profile", matchesMap().entry("drivers", instanceOf(List.class))) ); - MapMatcher commonProfile = matchesMap().entry("iterations", greaterThan(0)) - .entry("cpu_nanos", greaterThan(0)) - .entry("took_nanos", greaterThan(0)) - .entry("operators", instanceOf(List.class)); List> signatures = new ArrayList<>(); @SuppressWarnings("unchecked") List> profiles = (List>) ((Map) result.get("profile")).get("drivers"); for (Map p : profiles) { - assertThat(p, commonProfile); + assertThat(p, commonProfile()); List sig = new ArrayList<>(); @SuppressWarnings("unchecked") List> operators = (List>) p.get("operators"); @@ -348,15 +349,11 @@ public void testInlineStatsProfile() throws IOException { ).entry("values", values).entry("profile", matchesMap().entry("drivers", instanceOf(List.class))) ); - MapMatcher commonProfile = matchesMap().entry("iterations", greaterThan(0)) - .entry("cpu_nanos", greaterThan(0)) - .entry("took_nanos", greaterThan(0)) - .entry("operators", instanceOf(List.class)); List> signatures = new ArrayList<>(); @SuppressWarnings("unchecked") List> profiles = (List>) ((Map) result.get("profile")).get("drivers"); for (Map p : profiles) { - assertThat(p, commonProfile); + assertThat(p, commonProfile()); List sig = new ArrayList<>(); @SuppressWarnings("unchecked") List> operators = (List>) p.get("operators"); @@ -398,6 +395,115 @@ public void testInlineStatsProfile() throws IOException { ); } + public void testForceSleepsProfile() throws IOException { + assumeTrue("requires pragmas", Build.current().isSnapshot()); + + Request createIndex = new Request("PUT", testIndexName()); + createIndex.setJsonEntity(""" + { + "settings": { + "index": { + "number_of_shards": 1 + } + } + }"""); + Response response = client().performRequest(createIndex); + assertThat( + entityToMap(response.getEntity(), XContentType.JSON), + matchesMap().entry("shards_acknowledged", true).entry("index", testIndexName()).entry("acknowledged", true) + ); + + int groupCount = 300; + for (int group1 = 0; group1 < groupCount; group1++) { + StringBuilder b = new StringBuilder(); + for (int group2 = 0; group2 < groupCount; group2++) { + b.append(String.format(Locale.ROOT, """ + {"create":{"_index":"%s"}} + {"@timestamp":"2020-12-12","value":1,"group1":%d,"group2":%d} + """, testIndexName(), group1, group2)); + } + Request bulk = new Request("POST", "/_bulk"); + bulk.addParameter("refresh", "true"); + bulk.addParameter("filter_path", "errors"); + bulk.setJsonEntity(b.toString()); + response = client().performRequest(bulk); + Assert.assertEquals("{\"errors\":false}", EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8)); + } + + RequestObjectBuilder builder = requestObjectBuilder().query( + fromIndex() + " | STATS AVG(value), MAX(value), MIN(value) BY group1, group2 | SORT group1, group2 ASC | LIMIT 10" + ); + // Lock to shard level partitioning, so we get consistent profile output + builder.pragmas(Settings.builder().put("data_partitioning", "shard").put("page_size", 10).build()); + builder.profile(true); + Map result = runEsql(builder); + List> expectedValues = new ArrayList<>(); + for (int group2 = 0; group2 < 10; group2++) { + expectedValues.add(List.of(1.0, 1, 1, 0, group2)); + } + assertMap( + result, + matchesMap().entry( + "columns", + matchesList().item(matchesMap().entry("name", "AVG(value)").entry("type", "double")) + .item(matchesMap().entry("name", "MAX(value)").entry("type", "long")) + .item(matchesMap().entry("name", "MIN(value)").entry("type", "long")) + .item(matchesMap().entry("name", "group1").entry("type", "long")) + .item(matchesMap().entry("name", "group2").entry("type", "long")) + ).entry("values", expectedValues).entry("profile", matchesMap().entry("drivers", instanceOf(List.class))) + ); + + @SuppressWarnings("unchecked") + List> profiles = (List>) ((Map) result.get("profile")).get("drivers"); + + for (Map p : profiles) { + assertMap(p, commonProfile()); + @SuppressWarnings("unchecked") + Map sleeps = (Map) p.get("sleeps"); + String operators = p.get("operators").toString(); + MapMatcher sleepMatcher = matchesMap().entry("reason", "exchange empty") + .entry("sleep_millis", greaterThan(0L)) + .entry("wake_millis", greaterThan(0L)); + if (operators.contains("LuceneSourceOperator")) { + assertMap(sleeps, matchesMap().entry("counts", Map.of()).entry("first", List.of()).entry("last", List.of())); + } else if (operators.contains("ExchangeSourceOperator")) { + if (operators.contains("ExchangeSinkOperator")) { + assertMap(sleeps, matchesMap().entry("counts", matchesMap().entry("exchange empty", greaterThan(0))).extraOk()); + @SuppressWarnings("unchecked") + List> first = (List>) sleeps.get("first"); + for (Map s : first) { + assertMap(s, sleepMatcher); + } + @SuppressWarnings("unchecked") + List> last = (List>) sleeps.get("last"); + for (Map s : last) { + assertMap(s, sleepMatcher); + } + + } else { + assertMap( + sleeps, + matchesMap().entry("counts", matchesMap().entry("exchange empty", 1)) + .entry("first", List.of(sleepMatcher)) + .entry("last", List.of(sleepMatcher)) + ); + } + } else { + fail("unknown signature: " + operators); + } + } + } + + private MapMatcher commonProfile() { + return matchesMap().entry("start_millis", greaterThan(0L)) + .entry("stop_millis", greaterThan(0L)) + .entry("iterations", greaterThan(0)) + .entry("cpu_nanos", greaterThan(0)) + .entry("took_nanos", greaterThan(0)) + .entry("operators", instanceOf(List.class)) + .entry("sleeps", matchesMap().extraOk()); + } + private String checkOperatorProfile(Map o) { String name = (String) o.get("operator"); name = name.replaceAll("\\[.+", ""); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java index 782e1fb4333d8..2f3aa09868637 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.operator.AbstractPageMappingOperator; import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.compute.operator.DriverSleeps; import org.elasticsearch.compute.operator.DriverStatus; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.test.AbstractWireSerializingTestCase; @@ -51,7 +52,10 @@ private DriverProfile randomDriverProfile() { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), - randomList(10, this::randomOperatorStatus) + randomNonNegativeLong(), + randomNonNegativeLong(), + randomList(10, this::randomOperatorStatus), + DriverSleeps.empty() ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index e7f539026498b..9d4a1c21c5995 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.AbstractPageMappingOperator; import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.compute.operator.DriverSleeps; import org.elasticsearch.compute.operator.DriverStatus; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasables; @@ -479,10 +480,13 @@ public void testProfileXContent() { new EsqlQueryResponse.Profile( List.of( new DriverProfile( + 1723489812649L, + 1723489819929L, 20021, 20000, 12, - List.of(new DriverStatus.OperatorStatus("asdf", new AbstractPageMappingOperator.Status(10021, 10))) + List.of(new DriverStatus.OperatorStatus("asdf", new AbstractPageMappingOperator.Status(10021, 10))), + DriverSleeps.empty() ) ) ), @@ -509,6 +513,8 @@ public void testProfileXContent() { "profile" : { "drivers" : [ { + "start_millis" : 1723489812649, + "stop_millis" : 1723489819929, "took_nanos" : 20021, "cpu_nanos" : 20000, "iterations" : 12, @@ -520,7 +526,12 @@ public void testProfileXContent() { "pages_processed" : 10 } } - ] + ], + "sleeps" : { + "counts" : { }, + "first" : [ ], + "last" : [ ] + } } ] } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java index c93f3b9e0e350..26529a3605d38 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.compute.operator.DriverSleeps; import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskCancellationService; @@ -92,7 +93,17 @@ private ComputeResponse randomResponse() { int numProfiles = randomIntBetween(0, 2); List profiles = new ArrayList<>(numProfiles); for (int i = 0; i < numProfiles; i++) { - profiles.add(new DriverProfile(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), List.of())); + profiles.add( + new DriverProfile( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + List.of(), + DriverSleeps.empty() + ) + ); } return new ComputeResponse(profiles); } From 69293e28dc6d3237796ada6d12c75c84c73a1a29 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 19 Aug 2024 16:31:59 -0700 Subject: [PATCH 081/389] Use systemd socket directly instead of libsystemd (#111131) The libsystemd library function sd_notify is just a thin wrapper around opeing and writing to a unix filesystem socket. This commit replaces using libsystemd with opening the socket provided by systemd directly. relates #86475 --- .../nativeaccess/jna/JnaPosixCLibrary.java | 41 ++++++++++ .../nativeaccess/LinuxNativeAccess.java | 11 ++- .../elasticsearch/nativeaccess/Systemd.java | 81 ++++++++++++++++--- .../nativeaccess/lib/PosixCLibrary.java | 59 +++++++++++++- .../nativeaccess/jdk/JdkPosixCLibrary.java | 64 +++++++++++++++ .../nativeaccess/jdk/MemorySegmentUtil.java | 4 + .../nativeaccess/jdk/MemorySegmentUtil.java | 4 + 7 files changed, 248 insertions(+), 16 deletions(-) diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java index d984d239e0b39..82a69e4864d94 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java @@ -16,6 +16,7 @@ import com.sun.jna.Pointer; import com.sun.jna.Structure; +import org.elasticsearch.nativeaccess.CloseableByteBuffer; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import java.util.Arrays; @@ -109,6 +110,16 @@ public long bytesalloc() { } } + public static class JnaSockAddr implements SockAddr { + final Memory memory; + + JnaSockAddr(String path) { + this.memory = new Memory(110); + memory.setShort(0, AF_UNIX); + memory.setString(2, path, "UTF-8"); + } + } + private interface NativeFunctions extends Library { int geteuid(); @@ -126,6 +137,12 @@ private interface NativeFunctions extends Library { int close(int fd); + int socket(int domain, int type, int protocol); + + int connect(int sockfd, Pointer addr, int addrlen); + + long send(int sockfd, Pointer buf, long buflen, int flags); + String strerror(int errno); } @@ -235,6 +252,30 @@ public int fstat64(int fd, Stat64 stats) { return fstat64.fstat64(fd, jnaStats.memory); } + @Override + public int socket(int domain, int type, int protocol) { + return functions.socket(domain, type, protocol); + } + + @Override + public SockAddr newUnixSockAddr(String path) { + return new JnaSockAddr(path); + } + + @Override + public int connect(int sockfd, SockAddr addr) { + assert addr instanceof JnaSockAddr; + var jnaAddr = (JnaSockAddr) addr; + return functions.connect(sockfd, jnaAddr.memory, (int) jnaAddr.memory.size()); + } + + @Override + public long send(int sockfd, CloseableByteBuffer buffer, int flags) { + assert buffer instanceof JnaCloseableByteBuffer; + var nativeBuffer = (JnaCloseableByteBuffer) buffer; + return functions.send(sockfd, nativeBuffer.memory, nativeBuffer.buffer().remaining(), flags); + } + @Override public String strerror(int errno) { return functions.strerror(errno); diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java index f6e6035a8aba6..e1ea28e8786f5 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java @@ -12,7 +12,7 @@ import org.elasticsearch.nativeaccess.lib.LinuxCLibrary.SockFProg; import org.elasticsearch.nativeaccess.lib.LinuxCLibrary.SockFilter; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; +import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import java.util.Map; @@ -92,7 +92,14 @@ record Arch( LinuxNativeAccess(NativeLibraryProvider libraryProvider) { super("Linux", libraryProvider, new PosixConstants(-1L, 9, 1, 8, 64, 144, 48, 64)); this.linuxLibc = libraryProvider.getLibrary(LinuxCLibrary.class); - this.systemd = new Systemd(libraryProvider.getLibrary(SystemdLibrary.class)); + String socketPath = System.getenv("NOTIFY_SOCKET"); + if (socketPath == null) { + this.systemd = null; // not running under systemd + } else { + logger.debug("Systemd socket path: {}", socketPath); + var buffer = newBuffer(64); + this.systemd = new Systemd(libraryProvider.getLibrary(PosixCLibrary.class), socketPath, buffer); + } } @Override diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java index 4deade118b788..058cfe77b1ff3 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java @@ -10,17 +10,28 @@ import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; +import org.elasticsearch.nativeaccess.lib.PosixCLibrary; -import java.util.Locale; +import java.nio.charset.StandardCharsets; +/** + * Wraps access to notifications to systemd. + *

+ * Systemd notifications are done through a Unix socket. Although Java does support + * opening unix sockets, it unfortunately does not support datagram sockets. This class + * instead opens and communicates with the socket using native methods. + */ public class Systemd { private static final Logger logger = LogManager.getLogger(Systemd.class); - private final SystemdLibrary lib; + private final PosixCLibrary libc; + private final String socketPath; + private final CloseableByteBuffer buffer; - Systemd(SystemdLibrary lib) { - this.lib = lib; + Systemd(PosixCLibrary libc, String socketPath, CloseableByteBuffer buffer) { + this.libc = libc; + this.socketPath = socketPath; + this.buffer = buffer; } /** @@ -41,15 +52,61 @@ public void notify_stopping() { } private void notify(String state, boolean warnOnError) { - int rc = lib.sd_notify(0, state); - logger.trace("sd_notify({}, {}) returned [{}]", 0, state, rc); - if (rc < 0) { - String message = String.format(Locale.ROOT, "sd_notify(%d, %s) returned error [%d]", 0, state, rc); - if (warnOnError) { - logger.warn(message); + int sockfd = libc.socket(PosixCLibrary.AF_UNIX, PosixCLibrary.SOCK_DGRAM, 0); + if (sockfd < 0) { + throwOrLog("Could not open systemd socket: " + libc.strerror(libc.errno()), warnOnError); + return; + } + RuntimeException error = null; + try { + var sockAddr = libc.newUnixSockAddr(socketPath); + if (libc.connect(sockfd, sockAddr) != 0) { + throwOrLog("Could not connect to systemd socket: " + libc.strerror(libc.errno()), warnOnError); + return; + } + + byte[] bytes = state.getBytes(StandardCharsets.US_ASCII); + final long bytesSent; + synchronized (buffer) { + buffer.buffer().clear(); + buffer.buffer().put(0, bytes); + buffer.buffer().limit(bytes.length); + bytesSent = libc.send(sockfd, buffer, 0); + } + + if (bytesSent == -1) { + throwOrLog("Failed to send message (" + state + ") to systemd socket: " + libc.strerror(libc.errno()), warnOnError); + } else if (bytesSent != bytes.length) { + throwOrLog("Not all bytes of message (" + state + ") sent to systemd socket (sent " + bytesSent + ")", warnOnError); } else { - throw new RuntimeException(message); + logger.trace("Message (" + state + ") sent to systemd"); + } + } catch (RuntimeException e) { + error = e; + } finally { + if (libc.close(sockfd) != 0) { + try { + throwOrLog("Could not close systemd socket: " + libc.strerror(libc.errno()), warnOnError); + } catch (RuntimeException e) { + if (error != null) { + error.addSuppressed(e); + throw error; + } else { + throw e; + } + } + } else if (error != null) { + throw error; } } } + + private void throwOrLog(String message, boolean warnOnError) { + if (warnOnError) { + logger.warn(message); + } else { + logger.error(message); + throw new RuntimeException(message); + } + } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java index 0e7d07d0ad623..ac34fcb23b3eb 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java @@ -8,11 +8,19 @@ package org.elasticsearch.nativeaccess.lib; +import org.elasticsearch.nativeaccess.CloseableByteBuffer; + /** * Provides access to methods in libc.so available on POSIX systems. */ public non-sealed interface PosixCLibrary extends NativeLibrary { + /** socket domain indicating unix file socket */ + short AF_UNIX = 1; + + /** socket type indicating a datagram-oriented socket */ + int SOCK_DGRAM = 2; + /** * Gets the effective userid of the current process. * @@ -68,8 +76,6 @@ interface Stat64 { int open(String pathname, int flags); - int close(int fd); - int fstat64(int fd, Stat64 stats); int ftruncate(int fd, long length); @@ -90,6 +96,55 @@ interface FStore { int fcntl(int fd, int cmd, FStore fst); + /** + * Open a file descriptor to connect to a socket. + * + * @param domain The socket protocol family, eg AF_UNIX + * @param type The socket type, eg SOCK_DGRAM + * @param protocol The protocol for the given protocl family, normally 0 + * @return an open file descriptor, or -1 on failure with errno set + * @see socket manpage + */ + int socket(int domain, int type, int protocol); + + /** + * Marker interface for sockaddr struct implementations. + */ + interface SockAddr {} + + /** + * Create a sockaddr for the AF_UNIX family. + */ + SockAddr newUnixSockAddr(String path); + + /** + * Connect a socket to an address. + * + * @param sockfd An open socket file descriptor + * @param addr The address to connect to + * @return 0 on success, -1 on failure with errno set + */ + int connect(int sockfd, SockAddr addr); + + /** + * Send a message to a socket. + * + * @param sockfd The open socket file descriptor + * @param buffer The message bytes to send + * @param flags Flags that may adjust how the message is sent + * @return The number of bytes sent, or -1 on failure with errno set + * @see send manpage + */ + long send(int sockfd, CloseableByteBuffer buffer, int flags); + + /** + * Close a file descriptor + * @param fd The file descriptor to close + * @return 0 on success, -1 on failure with errno set + * @see close manpage + */ + int close(int fd); + /** * Return a string description for an error. * diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java index 7affd0614461d..f5e3132b76b56 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java @@ -10,6 +10,7 @@ import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import org.elasticsearch.nativeaccess.CloseableByteBuffer; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import java.lang.foreign.Arena; @@ -24,8 +25,10 @@ import static java.lang.foreign.MemoryLayout.PathElement.groupElement; import static java.lang.foreign.ValueLayout.ADDRESS; +import static java.lang.foreign.ValueLayout.JAVA_BYTE; import static java.lang.foreign.ValueLayout.JAVA_INT; import static java.lang.foreign.ValueLayout.JAVA_LONG; +import static java.lang.foreign.ValueLayout.JAVA_SHORT; import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle; import static org.elasticsearch.nativeaccess.jdk.MemorySegmentUtil.varHandleWithoutOffset; @@ -89,6 +92,18 @@ class JdkPosixCLibrary implements PosixCLibrary { } fstat$mh = fstat; } + private static final MethodHandle socket$mh = downcallHandleWithErrno( + "socket", + FunctionDescriptor.of(JAVA_INT, JAVA_INT, JAVA_INT, JAVA_INT) + ); + private static final MethodHandle connect$mh = downcallHandleWithErrno( + "connect", + FunctionDescriptor.of(JAVA_INT, JAVA_INT, ADDRESS, JAVA_INT) + ); + private static final MethodHandle send$mh = downcallHandleWithErrno( + "send", + FunctionDescriptor.of(JAVA_LONG, JAVA_INT, ADDRESS, JAVA_LONG, JAVA_INT) + ); static final MemorySegment errnoState = Arena.ofAuto().allocate(CAPTURE_ERRNO_LAYOUT); @@ -226,6 +241,44 @@ public int fstat64(int fd, Stat64 stat64) { } } + @Override + public int socket(int domain, int type, int protocol) { + try { + return (int) socket$mh.invokeExact(errnoState, domain, type, protocol); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public SockAddr newUnixSockAddr(String path) { + return new JdkSockAddr(path); + } + + @Override + public int connect(int sockfd, SockAddr addr) { + assert addr instanceof JdkSockAddr; + var jdkAddr = (JdkSockAddr) addr; + try { + return (int) connect$mh.invokeExact(errnoState, sockfd, jdkAddr.segment, (int) jdkAddr.segment.byteSize()); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public long send(int sockfd, CloseableByteBuffer buffer, int flags) { + assert buffer instanceof JdkCloseableByteBuffer; + var nativeBuffer = (JdkCloseableByteBuffer) buffer; + var segment = nativeBuffer.segment; + try { + logger.info("Sending {} bytes to socket", buffer.buffer().remaining()); + return (long) send$mh.invokeExact(errnoState, sockfd, segment, (long) buffer.buffer().remaining(), flags); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + static class JdkRLimit implements RLimit { private static final MemoryLayout layout = MemoryLayout.structLayout(JAVA_LONG, JAVA_LONG); private static final VarHandle rlim_cur$vh = varHandleWithoutOffset(layout, groupElement(0)); @@ -326,4 +379,15 @@ public long bytesalloc() { return (long) st_bytesalloc$vh.get(segment); } } + + private static class JdkSockAddr implements SockAddr { + private static final MemoryLayout layout = MemoryLayout.structLayout(JAVA_SHORT, MemoryLayout.sequenceLayout(108, JAVA_BYTE)); + final MemorySegment segment; + + JdkSockAddr(String path) { + segment = Arena.ofAuto().allocate(layout); + segment.set(JAVA_SHORT, 0, AF_UNIX); + MemorySegmentUtil.setString(segment, 2, path); + } + } } diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java index c65711af0f63f..6c4c9bd0111c0 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java @@ -22,6 +22,10 @@ static String getString(MemorySegment segment, long offset) { return segment.getUtf8String(offset); } + static void setString(MemorySegment segment, long offset, String value) { + segment.setUtf8String(offset, value); + } + static MemorySegment allocateString(Arena arena, String s) { return arena.allocateUtf8String(s); } diff --git a/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java b/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java index 25c449337e294..23d9919603ab4 100644 --- a/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java +++ b/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java @@ -20,6 +20,10 @@ static String getString(MemorySegment segment, long offset) { return segment.getString(offset); } + static void setString(MemorySegment segment, long offset, String value) { + segment.setString(offset, value); + } + static MemorySegment allocateString(Arena arena, String s) { return arena.allocateFrom(s); } From 1047453b1a287046407eb8c7c84bba85c2312beb Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 20 Aug 2024 07:24:21 +0100 Subject: [PATCH 082/389] Improve interrupt handling in tests (#111957) The test utilities `waitUntil()`, `indexRandom()`, `startInParallel()` and `runInParallel()` all declare `InterruptedException` amongst the checked exceptions they throw, but in practice there's nothing useful to do with such an exception except to fail the test. With this change we handle the interrupt within the utility methods instead, avoiding exception-handling noise in callers. --- .../node/tasks/CancellableTasksTests.java | 14 ++- .../cluster/node/tasks/TestTaskPlugin.java | 20 ++--- .../elasticsearch/test/ESIntegTestCase.java | 48 +++++------ .../org/elasticsearch/test/ESTestCase.java | 41 +++++---- .../test/InternalTestCluster.java | 6 +- .../SearchableSnapshotsIntegTests.java | 8 +- .../SessionFactoryLoadBalancingTests.java | 85 +++++++++---------- 7 files changed, 100 insertions(+), 122 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index e541fef65a0f9..64b9b4f0b69d8 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -158,15 +158,11 @@ protected NodeResponse nodeOperation(CancellableNodeRequest request, Task task) if (shouldBlock) { // Simulate a job that takes forever to finish // Using periodic checks method to identify that the task was cancelled - try { - waitUntil(() -> { - ((CancellableTask) task).ensureNotCancelled(); - return false; - }); - fail("It should have thrown an exception"); - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } + waitUntil(() -> { + ((CancellableTask) task).ensureNotCancelled(); + return false; + }); + fail("It should have thrown an exception"); } debugDelay("op4"); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index 16392b3f59baa..903ecfe2b2aa7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -283,16 +283,12 @@ protected void doExecute(Task task, NodesRequest request, ActionListener { - if (((CancellableTask) task).isCancelled()) { - throw new RuntimeException("Cancelled!"); - } - return ((TestTask) task).isBlocked() == false; - }); - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } + waitUntil(() -> { + if (((CancellableTask) task).isCancelled()) { + throw new RuntimeException("Cancelled!"); + } + return ((TestTask) task).isBlocked() == false; + }); } logger.info("Test task finished on the node {}", clusterService.localNode()); return new NodeResponse(clusterService.localNode()); @@ -301,9 +297,7 @@ protected NodeResponse nodeOperation(NodeRequest request, Task task) { public static class UnblockTestTaskResponse implements Writeable { - UnblockTestTaskResponse() { - - } + UnblockTestTaskResponse() {} UnblockTestTaskResponse(StreamInput in) {} diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index fa686a0bc753a..cf469546b6f63 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1192,23 +1192,19 @@ public static List> findTasks(Cl @Nullable public static DiscoveryNode waitAndGetHealthNode(InternalTestCluster internalCluster) { DiscoveryNode[] healthNode = new DiscoveryNode[1]; - try { - waitUntil(() -> { - ClusterState state = internalCluster.client() - .admin() - .cluster() - .prepareState() - .clear() - .setMetadata(true) - .setNodes(true) - .get() - .getState(); - healthNode[0] = HealthNode.findHealthNode(state); - return healthNode[0] != null; - }, 15, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } + waitUntil(() -> { + ClusterState state = internalCluster.client() + .admin() + .cluster() + .prepareState() + .clear() + .setMetadata(true) + .setNodes(true) + .get() + .getState(); + healthNode[0] = HealthNode.findHealthNode(state); + return healthNode[0] != null; + }, 15, TimeUnit.SECONDS); return healthNode[0]; } @@ -1640,7 +1636,7 @@ protected static IndicesAdminClient indicesAdmin() { return admin().indices(); } - public void indexRandom(boolean forceRefresh, String index, int numDocs) throws InterruptedException { + public void indexRandom(boolean forceRefresh, String index, int numDocs) { IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { builders[i] = prepareIndex(index).setSource("field", "value"); @@ -1651,11 +1647,11 @@ public void indexRandom(boolean forceRefresh, String index, int numDocs) throws /** * Convenience method that forwards to {@link #indexRandom(boolean, List)}. */ - public void indexRandom(boolean forceRefresh, IndexRequestBuilder... builders) throws InterruptedException { + public void indexRandom(boolean forceRefresh, IndexRequestBuilder... builders) { indexRandom(forceRefresh, Arrays.asList(builders)); } - public void indexRandom(boolean forceRefresh, boolean dummyDocuments, IndexRequestBuilder... builders) throws InterruptedException { + public void indexRandom(boolean forceRefresh, boolean dummyDocuments, IndexRequestBuilder... builders) { indexRandom(forceRefresh, dummyDocuments, Arrays.asList(builders)); } @@ -1674,7 +1670,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, IndexReque * @param builders the documents to index. * @see #indexRandom(boolean, boolean, java.util.List) */ - public void indexRandom(boolean forceRefresh, List builders) throws InterruptedException { + public void indexRandom(boolean forceRefresh, List builders) { indexRandom(forceRefresh, forceRefresh, builders); } @@ -1690,7 +1686,7 @@ public void indexRandom(boolean forceRefresh, List builders * all documents are indexed. This is useful to produce deleted documents on the server side. * @param builders the documents to index. */ - public void indexRandom(boolean forceRefresh, boolean dummyDocuments, List builders) throws InterruptedException { + public void indexRandom(boolean forceRefresh, boolean dummyDocuments, List builders) { indexRandom(forceRefresh, dummyDocuments, true, builders); } @@ -1707,8 +1703,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, List builders) - throws InterruptedException { + public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean maybeFlush, List builders) { Random random = random(); Set indices = new HashSet<>(); builders = new ArrayList<>(builders); @@ -1822,8 +1817,7 @@ private static CountDownLatch newLatch(List latches) { /** * Maybe refresh, force merge, or flush then always make sure there aren't too many in flight async operations. */ - private void postIndexAsyncActions(String[] indices, List inFlightAsyncOperations, boolean maybeFlush) - throws InterruptedException { + private void postIndexAsyncActions(String[] indices, List inFlightAsyncOperations, boolean maybeFlush) { if (rarely()) { if (rarely()) { indicesAdmin().prepareRefresh(indices) @@ -1843,7 +1837,7 @@ private void postIndexAsyncActions(String[] indices, List inFlig } while (inFlightAsyncOperations.size() > MAX_IN_FLIGHT_ASYNC_INDEXES) { int waitFor = between(0, inFlightAsyncOperations.size() - 1); - inFlightAsyncOperations.remove(waitFor).await(); + safeAwait(inFlightAsyncOperations.remove(waitFor)); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 08709ff6459ce..58487d6552bcd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -213,6 +213,7 @@ import static org.hamcrest.Matchers.emptyCollectionOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.startsWith; /** @@ -1420,9 +1421,8 @@ public static void assertBusy(CheckedRunnable codeBlock, long maxWait * * @param breakSupplier determines whether to return immediately or continue waiting. * @return the last value returned by breakSupplier - * @throws InterruptedException if any sleep calls were interrupted. */ - public static boolean waitUntil(BooleanSupplier breakSupplier) throws InterruptedException { + public static boolean waitUntil(BooleanSupplier breakSupplier) { return waitUntil(breakSupplier, 10, TimeUnit.SECONDS); } @@ -1438,9 +1438,8 @@ public static boolean waitUntil(BooleanSupplier breakSupplier) throws Interrupte * @param maxWaitTime the maximum amount of time to wait * @param unit the unit of tie for maxWaitTime * @return the last value returned by breakSupplier - * @throws InterruptedException if any sleep calls were interrupted. */ - public static boolean waitUntil(BooleanSupplier breakSupplier, long maxWaitTime, TimeUnit unit) throws InterruptedException { + public static boolean waitUntil(BooleanSupplier breakSupplier, long maxWaitTime, TimeUnit unit) { long maxTimeInMillis = TimeUnit.MILLISECONDS.convert(maxWaitTime, unit); long timeInMillis = 1; long sum = 0; @@ -1448,12 +1447,12 @@ public static boolean waitUntil(BooleanSupplier breakSupplier, long maxWaitTime, if (breakSupplier.getAsBoolean()) { return true; } - Thread.sleep(timeInMillis); + safeSleep(timeInMillis); sum += timeInMillis; timeInMillis = Math.min(AWAIT_BUSY_THRESHOLD, timeInMillis * 2); } timeInMillis = maxTimeInMillis - sum; - Thread.sleep(Math.max(timeInMillis, 0)); + safeSleep(Math.max(timeInMillis, 0)); return breakSupplier.getAsBoolean(); } @@ -2505,7 +2504,7 @@ public static T expectThrows(Class expectedType, Reques * Same as {@link #runInParallel(int, IntConsumer)} but also attempts to start all tasks at the same time by blocking execution on a * barrier until all threads are started and ready to execute their task. */ - public static void startInParallel(int numberOfTasks, IntConsumer taskFactory) throws InterruptedException { + public static void startInParallel(int numberOfTasks, IntConsumer taskFactory) { final CyclicBarrier barrier = new CyclicBarrier(numberOfTasks); runInParallel(numberOfTasks, i -> { safeAwait(barrier); @@ -2519,7 +2518,7 @@ public static void startInParallel(int numberOfTasks, IntConsumer taskFactory) t * @param numberOfTasks number of tasks to run in parallel * @param taskFactory task factory */ - public static void runInParallel(int numberOfTasks, IntConsumer taskFactory) throws InterruptedException { + public static void runInParallel(int numberOfTasks, IntConsumer taskFactory) { final ArrayList> futures = new ArrayList<>(numberOfTasks); final Thread[] threads = new Thread[numberOfTasks - 1]; for (int i = 0; i < numberOfTasks; i++) { @@ -2534,16 +2533,26 @@ public static void runInParallel(int numberOfTasks, IntConsumer taskFactory) thr threads[i].start(); } } - for (Thread thread : threads) { - thread.join(); - } Exception e = null; - for (Future future : futures) { - try { - future.get(); - } catch (Exception ex) { - e = ExceptionsHelper.useOrSuppress(e, ex); + try { + for (Thread thread : threads) { + // no sense in waiting for the rest of the threads, nor any futures, if interrupted, just bail out and fail + thread.join(); + } + for (Future future : futures) { + try { + future.get(); + } catch (InterruptedException interruptedException) { + // no sense in waiting for the rest of the futures if interrupted, just bail out and fail + Thread.currentThread().interrupt(); + throw interruptedException; + } catch (Exception executionException) { + e = ExceptionsHelper.useOrSuppress(e, executionException); + } } + } catch (InterruptedException interruptedException) { + Thread.currentThread().interrupt(); + e = ExceptionsHelper.useOrSuppress(e, interruptedException); } if (e != null) { throw new AssertionError(e); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 0b69245177c7a..332df7123fd1b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1744,11 +1744,7 @@ private synchronized void startAndPublishNodesAndClients(List nod .filter(nac -> nodes.containsKey(nac.name) == false) // filter out old masters .count(); rebuildUnicastHostFiles(nodeAndClients); // ensure that new nodes can find the existing nodes when they start - try { - runInParallel(nodeAndClients.size(), i -> nodeAndClients.get(i).startNode()); - } catch (InterruptedException e) { - throw new AssertionError("interrupted while starting nodes", e); - } + runInParallel(nodeAndClients.size(), i -> nodeAndClients.get(i).startNode()); nodeAndClients.forEach(this::publishNode); if (autoManageMasterNodes && newMasters > 0) { diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java index 56aec13cbab29..c99f2be0a6cad 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java @@ -371,12 +371,8 @@ public void testCanMountSnapshotTakenWhileConcurrentlyIndexing() throws Exceptio for (int i = between(10, 10_000); i >= 0; i--) { indexRequestBuilders.add(prepareIndex(indexName).setSource("foo", randomBoolean() ? "bar" : "baz")); } - try { - safeAwait(cyclicBarrier); - indexRandom(true, true, indexRequestBuilders); - } catch (InterruptedException e) { - throw new AssertionError(e); - } + safeAwait(cyclicBarrier); + indexRandom(true, true, indexRequestBuilders); refresh(indexName); assertThat( indicesAdmin().prepareForceMerge(indexName).setOnlyExpungeDeletes(true).setFlush(true).get().getFailedShards(), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java index 466d0e3428d50..6abf6c81b673e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java @@ -401,59 +401,52 @@ private PortBlockingRunnable( public void run() { final List openedSockets = new ArrayList<>(); final List failedAddresses = new ArrayList<>(); - try { - final boolean allSocketsOpened = waitUntil(() -> { - try { - final InetAddress[] allAddresses; - if (serverAddress instanceof Inet4Address) { - allAddresses = NetworkUtils.getAllIPV4Addresses(); - } else { - allAddresses = NetworkUtils.getAllIPV6Addresses(); - } - final List inetAddressesToBind = Arrays.stream(allAddresses) - .filter(addr -> openedSockets.stream().noneMatch(s -> addr.equals(s.getLocalAddress()))) - .filter(addr -> failedAddresses.contains(addr) == false) - .collect(Collectors.toList()); - for (InetAddress localAddress : inetAddressesToBind) { - try { - final Socket socket = openMockSocket(serverAddress, serverPort, localAddress, portToBind); - openedSockets.add(socket); - logger.debug("opened socket [{}]", socket); - } catch (NoRouteToHostException | ConnectException e) { - logger.debug(() -> "marking address [" + localAddress + "] as failed due to:", e); - failedAddresses.add(localAddress); - } - } - if (openedSockets.size() == 0) { - logger.debug("Could not open any sockets from the available addresses"); - return false; + + final boolean allSocketsOpened = waitUntil(() -> { + try { + final InetAddress[] allAddresses; + if (serverAddress instanceof Inet4Address) { + allAddresses = NetworkUtils.getAllIPV4Addresses(); + } else { + allAddresses = NetworkUtils.getAllIPV6Addresses(); + } + final List inetAddressesToBind = Arrays.stream(allAddresses) + .filter(addr -> openedSockets.stream().noneMatch(s -> addr.equals(s.getLocalAddress()))) + .filter(addr -> failedAddresses.contains(addr) == false) + .collect(Collectors.toList()); + for (InetAddress localAddress : inetAddressesToBind) { + try { + final Socket socket = openMockSocket(serverAddress, serverPort, localAddress, portToBind); + openedSockets.add(socket); + logger.debug("opened socket [{}]", socket); + } catch (NoRouteToHostException | ConnectException e) { + logger.debug(() -> "marking address [" + localAddress + "] as failed due to:", e); + failedAddresses.add(localAddress); } - return true; - } catch (IOException e) { - logger.debug(() -> "caught exception while opening socket on [" + portToBind + "]", e); + } + if (openedSockets.size() == 0) { + logger.debug("Could not open any sockets from the available addresses"); return false; } - }); - - if (allSocketsOpened) { - latch.countDown(); - } else { - success.set(false); - IOUtils.closeWhileHandlingException(openedSockets); - openedSockets.clear(); - latch.countDown(); - return; + return true; + } catch (IOException e) { + logger.debug(() -> "caught exception while opening socket on [" + portToBind + "]", e); + return false; } - } catch (InterruptedException e) { - logger.debug(() -> "interrupted while trying to open sockets on [" + portToBind + "]", e); - Thread.currentThread().interrupt(); + }); + + if (allSocketsOpened) { + latch.countDown(); + } else { + success.set(false); + IOUtils.closeWhileHandlingException(openedSockets); + openedSockets.clear(); + latch.countDown(); + return; } try { - closeLatch.await(); - } catch (InterruptedException e) { - logger.debug("caught exception while waiting for close latch", e); - Thread.currentThread().interrupt(); + safeAwait(closeLatch); } finally { logger.debug("closing sockets on [{}]", portToBind); IOUtils.closeWhileHandlingException(openedSockets); From fa58a9d08d9696b0a19ce10cb44bba8cf752a5fb Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 20 Aug 2024 07:25:55 +0100 Subject: [PATCH 083/389] Add known issue docs for #111854 (#111978) --- docs/reference/api-conventions.asciidoc | 1 + docs/reference/release-notes/8.15.0.asciidoc | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index 25881b707d724..f8d925945401e 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -334,6 +334,7 @@ All REST API parameters (both request parameters and JSON body) support providing boolean "false" as the value `false` and boolean "true" as the value `true`. All other values will raise an error. +[[api-conventions-number-values]] [discrete] === Number Values diff --git a/docs/reference/release-notes/8.15.0.asciidoc b/docs/reference/release-notes/8.15.0.asciidoc index e2314381a4b06..2069c1bd96ff0 100644 --- a/docs/reference/release-notes/8.15.0.asciidoc +++ b/docs/reference/release-notes/8.15.0.asciidoc @@ -22,6 +22,10 @@ Either downgrade to an earlier version, upgrade to 8.15.1, or else follow the recommendation in the manual to entirely disable swap instead of using the memory lock feature (issue: {es-issue}111847[#111847]) +* The `took` field of the response to the <> API is incorrect and may be rather large. Clients which +<> assume that this value will be within a particular range (e.g. that it fits into a 32-bit +signed integer) may encounter errors (issue: {es-issue}111854[#111854]) + [[breaking-8.15.0]] [float] === Breaking changes From c80b79678935ea62af676b7431fedb0af9bcb7ba Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Tue, 20 Aug 2024 09:37:02 +0300 Subject: [PATCH 084/389] ESQL: don't lose the original casting error message (#111968) --- docs/changelog/111968.yaml | 6 ++++++ .../xpack/esql/analysis/Analyzer.java | 3 +++ .../xpack/esql/analysis/VerifierTests.java | 20 +++++++++++++++++++ 3 files changed, 29 insertions(+) create mode 100644 docs/changelog/111968.yaml diff --git a/docs/changelog/111968.yaml b/docs/changelog/111968.yaml new file mode 100644 index 0000000000000..9d758c76369e9 --- /dev/null +++ b/docs/changelog/111968.yaml @@ -0,0 +1,6 @@ +pr: 111968 +summary: "ESQL: don't lose the original casting error message" +area: ES|QL +type: bug +issues: + - 111967 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 4a7120a1d3d92..4a116fd102cd0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -856,6 +856,9 @@ private static List potentialCandidatesIfNoMatchesFound( Collection attrList, java.util.function.Function, String> messageProducer ) { + if (ua.customMessage()) { + return List.of(); + } // none found - add error message if (matches.isEmpty()) { Set names = new HashSet<>(attrList.size()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 9b0c32b8ade2e..ab216e10b674c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -255,10 +255,30 @@ public void testRoundFunctionInvalidInputs() { "1:31: second argument of [round(a, 3.5)] must be [integer], found value [3.5] type [double]", error("row a = 1, b = \"c\" | eval x = round(a, 3.5)") ); + } + + public void testImplicitCastingErrorMessages() { assertEquals( "1:23: Cannot convert string [c] to [INTEGER], error [Cannot parse number [c]]", error("row a = round(123.45, \"c\")") ); + assertEquals( + "1:27: Cannot convert string [c] to [DOUBLE], error [Cannot parse number [c]]", + error("row a = 1 | eval x = acos(\"c\")") + ); + assertEquals( + "1:33: Cannot convert string [c] to [DOUBLE], error [Cannot parse number [c]]\n" + + "line 1:38: Cannot convert string [a] to [INTEGER], error [Cannot parse number [a]]", + error("row a = 1 | eval x = round(acos(\"c\"),\"a\")") + ); + assertEquals( + "1:63: Cannot convert string [x] to [INTEGER], error [Cannot parse number [x]]", + error("row ip4 = to_ip(\"1.2.3.4\") | eval ip4_prefix = ip_prefix(ip4, \"x\", 0)") + ); + assertEquals( + "1:42: Cannot convert string [a] to [DOUBLE], error [Cannot parse number [a]]", + error("ROW a=[3, 5, 1, 6] | EVAL avg_a = MV_AVG(\"a\")") + ); } public void testAggsExpressionsInStatsAggs() { From ad90d1f0f62499c4ce1e31915db6cd6cc750106f Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Tue, 20 Aug 2024 09:54:55 +0300 Subject: [PATCH 085/389] Introduce global retention in data stream lifecycle (cluster settings) (#111972) In this PR we introduce cluster settings to manage the global data stream retention. We introduce two settings `data_streams.lifecycle.retention.max` & `data_streams.lifecycle.retention.default` that configure the respective retentions. The settings are loaded and monitored by the `DataStreamGlobalRetentionSettings`. The validation has also moved there. We preserved the `DataStreamGlobalRetention` record to reduce the impact of this change. The purpose of this method is to be simply a wrapper record that groups the retention settings together. Temporarily, the `DataStreamGlobalRetentionSettings` is using the DataStreamFactoryRetention which is marked as deprecated for migration purposes. --- docs/changelog/111972.yaml | 15 ++ .../data-stream-lifecycle-settings.asciidoc | 12 ++ .../datastreams/DataStreamsPlugin.java | 2 +- .../action/GetDataStreamsTransportAction.java | 14 +- .../lifecycle/DataStreamLifecycleService.java | 12 +- ...sportExplainDataStreamLifecycleAction.java | 10 +- ...TransportGetDataStreamLifecycleAction.java | 10 +- .../MetadataIndexTemplateServiceTests.java | 7 +- .../GetDataStreamsTransportActionTests.java | 45 ++--- .../DataStreamLifecycleServiceTests.java | 9 +- .../metadata/DataStreamFactoryRetention.java | 2 + .../metadata/DataStreamGlobalRetention.java | 6 +- .../DataStreamGlobalRetentionProvider.java | 34 ---- .../DataStreamGlobalRetentionSettings.java | 180 ++++++++++++++++++ .../metadata/MetadataDataStreamsService.java | 8 +- .../MetadataIndexTemplateService.java | 12 +- .../common/settings/ClusterSettings.java | 5 +- .../elasticsearch/node/NodeConstruction.java | 27 +-- .../org/elasticsearch/plugins/Plugin.java | 6 +- ...vedComposableIndexTemplateActionTests.java | 14 +- ...ataStreamGlobalRetentionProviderTests.java | 58 ------ ...ataStreamGlobalRetentionSettingsTests.java | 141 ++++++++++++++ ...amLifecycleWithRetentionWarningsTests.java | 40 ++-- .../MetadataDataStreamsServiceTests.java | 6 +- .../MetadataIndexTemplateServiceTests.java | 14 +- 25 files changed, 476 insertions(+), 213 deletions(-) create mode 100644 docs/changelog/111972.yaml delete mode 100644 server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProvider.java create mode 100644 server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettings.java delete mode 100644 server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProviderTests.java create mode 100644 server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettingsTests.java diff --git a/docs/changelog/111972.yaml b/docs/changelog/111972.yaml new file mode 100644 index 0000000000000..58477c68f0e7c --- /dev/null +++ b/docs/changelog/111972.yaml @@ -0,0 +1,15 @@ +pr: 111972 +summary: Introduce global retention in data stream lifecycle. +area: Data streams +type: feature +issues: [] +highlight: + title: Add global retention in data stream lifecycle + body: "Data stream lifecycle now supports configuring retention on a cluster level,\ + \ namely global retention. Global retention \nallows us to configure two different\ + \ retentions:\n\n- `data_streams.lifecycle.retention.default` is applied to all\ + \ data streams managed by the data stream lifecycle that do not have retention\n\ + defined on the data stream level.\n- `data_streams.lifecycle.retention.max` is\ + \ applied to all data streams managed by the data stream lifecycle and it allows\ + \ any data stream \ndata to be deleted after the `max_retention` has passed." + notable: true diff --git a/docs/reference/settings/data-stream-lifecycle-settings.asciidoc b/docs/reference/settings/data-stream-lifecycle-settings.asciidoc index 0f00e956472d0..4b055525d4e6c 100644 --- a/docs/reference/settings/data-stream-lifecycle-settings.asciidoc +++ b/docs/reference/settings/data-stream-lifecycle-settings.asciidoc @@ -10,6 +10,18 @@ These are the settings available for configuring <>, <>) +The maximum retention period that will apply to all user data streams managed by the data stream lifecycle. The max retention will also +override the retention of a data stream whose configured retention exceeds the max retention. It should be greater than `10s`. + +[[data-streams-lifecycle-retention-default]] +`data_streams.lifecycle.retention.default`:: +(<>, <>) +The retention period that will apply to all user data streams managed by the data stream lifecycle that do not have retention configured. +It should be greater than `10s` and less or equals than <>. + [[data-streams-lifecycle-poll-interval]] `data_streams.lifecycle.poll_interval`:: (<>, <>) diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index cd233e29dee0e..615c0006a4ce6 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -201,7 +201,7 @@ public Collection createComponents(PluginServices services) { errorStoreInitialisationService.get(), services.allocationService(), dataStreamLifecycleErrorsPublisher.get(), - services.dataStreamGlobalRetentionProvider() + services.dataStreamGlobalRetentionSettings() ) ); dataLifecycleInitialisationService.get().init(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java index b32ba361963e5..dcca32355082b 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java @@ -21,7 +21,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.health.ClusterStateHealth; import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -57,7 +57,7 @@ public class GetDataStreamsTransportAction extends TransportMasterNodeReadAction private static final Logger LOGGER = LogManager.getLogger(GetDataStreamsTransportAction.class); private final SystemIndices systemIndices; private final ClusterSettings clusterSettings; - private final DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; @Inject public GetDataStreamsTransportAction( @@ -67,7 +67,7 @@ public GetDataStreamsTransportAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, SystemIndices systemIndices, - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider + DataStreamGlobalRetentionSettings globalRetentionSettings ) { super( GetDataStreamAction.NAME, @@ -81,7 +81,7 @@ public GetDataStreamsTransportAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.systemIndices = systemIndices; - this.dataStreamGlobalRetentionProvider = dataStreamGlobalRetentionProvider; + this.globalRetentionSettings = globalRetentionSettings; clusterSettings = clusterService.getClusterSettings(); } @@ -93,7 +93,7 @@ protected void masterOperation( ActionListener listener ) throws Exception { listener.onResponse( - innerOperation(state, request, indexNameExpressionResolver, systemIndices, clusterSettings, dataStreamGlobalRetentionProvider) + innerOperation(state, request, indexNameExpressionResolver, systemIndices, clusterSettings, globalRetentionSettings) ); } @@ -103,7 +103,7 @@ static GetDataStreamAction.Response innerOperation( IndexNameExpressionResolver indexNameExpressionResolver, SystemIndices systemIndices, ClusterSettings clusterSettings, - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider + DataStreamGlobalRetentionSettings globalRetentionSettings ) { List dataStreams = getDataStreams(state, indexNameExpressionResolver, request); List dataStreamInfos = new ArrayList<>(dataStreams.size()); @@ -223,7 +223,7 @@ public int compareTo(IndexInfo o) { return new GetDataStreamAction.Response( dataStreamInfos, request.includeDefaults() ? clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) : null, - dataStreamGlobalRetentionProvider.provide() + globalRetentionSettings.get() ); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 9e1b01ef47a88..0cb29dbcf5b2f 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -44,7 +44,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -162,7 +162,7 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab final ResultDeduplicator transportActionsDeduplicator; final ResultDeduplicator clusterStateChangesDeduplicator; private final DataStreamLifecycleHealthInfoPublisher dslHealthInfoPublisher; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; private LongSupplier nowSupplier; private final Clock clock; private final DataStreamLifecycleErrorStore errorStore; @@ -211,7 +211,7 @@ public DataStreamLifecycleService( DataStreamLifecycleErrorStore errorStore, AllocationService allocationService, DataStreamLifecycleHealthInfoPublisher dataStreamLifecycleHealthInfoPublisher, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { this.settings = settings; this.client = client; @@ -222,7 +222,7 @@ public DataStreamLifecycleService( this.clusterStateChangesDeduplicator = new ResultDeduplicator<>(threadPool.getThreadContext()); this.nowSupplier = nowSupplier; this.errorStore = errorStore; - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; this.scheduledJob = null; this.pollInterval = DATA_STREAM_LIFECYCLE_POLL_INTERVAL_SETTING.get(settings); this.targetMergePolicyFloorSegment = DATA_STREAM_MERGE_POLICY_TARGET_FLOOR_SEGMENT_SETTING.get(settings); @@ -819,7 +819,7 @@ private Index maybeExecuteRollover(ClusterState state, DataStream dataStream, bo RolloverRequest rolloverRequest = getDefaultRolloverRequest( rolloverConfiguration, dataStream.getName(), - dataStream.getLifecycle().getEffectiveDataRetention(dataStream.isSystem() ? null : globalRetentionResolver.provide()), + dataStream.getLifecycle().getEffectiveDataRetention(dataStream.isSystem() ? null : globalRetentionSettings.get()), rolloverFailureStore ); transportActionsDeduplicator.executeOnce( @@ -871,7 +871,7 @@ private Index maybeExecuteRollover(ClusterState state, DataStream dataStream, bo */ Set maybeExecuteRetention(ClusterState state, DataStream dataStream, Set indicesToExcludeForRemainingRun) { Metadata metadata = state.metadata(); - DataStreamGlobalRetention globalRetention = dataStream.isSystem() ? null : globalRetentionResolver.provide(); + DataStreamGlobalRetention globalRetention = dataStream.isSystem() ? null : globalRetentionSettings.get(); List backingIndicesOlderThanRetention = dataStream.getIndicesPastRetention(metadata::index, nowSupplier, globalRetention); if (backingIndicesOlderThanRetention.isEmpty()) { return Set.of(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java index 408bc3b239f23..855b1713e5ec2 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java @@ -18,7 +18,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -44,7 +44,7 @@ public class TransportExplainDataStreamLifecycleAction extends TransportMasterNo ExplainDataStreamLifecycleAction.Response> { private final DataStreamLifecycleErrorStore errorStore; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; @Inject public TransportExplainDataStreamLifecycleAction( @@ -54,7 +54,7 @@ public TransportExplainDataStreamLifecycleAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, DataStreamLifecycleErrorStore dataLifecycleServiceErrorStore, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { super( ExplainDataStreamLifecycleAction.INSTANCE.name(), @@ -68,7 +68,7 @@ public TransportExplainDataStreamLifecycleAction( threadPool.executor(ThreadPool.Names.MANAGEMENT) ); this.errorStore = dataLifecycleServiceErrorStore; - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; } @Override @@ -118,7 +118,7 @@ protected void masterOperation( new ExplainDataStreamLifecycleAction.Response( explainIndices, request.includeDefaults() ? clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) : null, - globalRetentionResolver.provide() + globalRetentionSettings.get() ) ); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java index 3def1351dd5e8..452295aab0ce9 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java @@ -16,7 +16,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; @@ -40,7 +40,7 @@ public class TransportGetDataStreamLifecycleAction extends TransportMasterNodeRe GetDataStreamLifecycleAction.Request, GetDataStreamLifecycleAction.Response> { private final ClusterSettings clusterSettings; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; @Inject public TransportGetDataStreamLifecycleAction( @@ -49,7 +49,7 @@ public TransportGetDataStreamLifecycleAction( ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { super( GetDataStreamLifecycleAction.INSTANCE.name(), @@ -63,7 +63,7 @@ public TransportGetDataStreamLifecycleAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); clusterSettings = clusterService.getClusterSettings(); - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; } @Override @@ -96,7 +96,7 @@ protected void masterOperation( .sorted(Comparator.comparing(GetDataStreamLifecycleAction.Response.DataStreamLifecycle::dataStreamName)) .toList(), request.includeDefaults() ? clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) : null, - globalRetentionResolver.provide() + globalRetentionSettings.get() ) ); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java index b61b70f55c734..d5356e371f497 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; @@ -216,7 +216,10 @@ private MetadataIndexTemplateService getMetadataIndexTemplateService() { xContentRegistry(), EmptySystemIndices.INSTANCE, indexSettingProviders, - new DataStreamGlobalRetentionProvider(DataStreamFactoryRetention.emptyFactoryRetention()) + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java index cd3f862a51ddf..80d867ec7745e 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; @@ -45,7 +45,8 @@ public class GetDataStreamsTransportActionTests extends ESTestCase { private final IndexNameExpressionResolver resolver = TestIndexNameExpressionResolver.newInstance(); private final SystemIndices systemIndices = new SystemIndices(List.of()); - private final DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider = new DataStreamGlobalRetentionProvider( + private final DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), DataStreamFactoryRetention.emptyFactoryRetention() ); @@ -165,7 +166,7 @@ public void testGetTimeSeriesDataStream() { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ); assertThat( response.getDataStreams(), @@ -195,7 +196,7 @@ public void testGetTimeSeriesDataStream() { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ); assertThat( response.getDataStreams(), @@ -245,7 +246,7 @@ public void testGetTimeSeriesDataStreamWithOutOfOrderIndices() { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ); assertThat( response.getDataStreams(), @@ -288,7 +289,7 @@ public void testGetTimeSeriesMixedDataStream() { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ); var name1 = DataStream.getDefaultBackingIndexName("ds-1", 1, instant.toEpochMilli()); @@ -333,30 +334,24 @@ public void testPassingGlobalRetention() { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ); assertThat(response.getGlobalRetention(), nullValue()); DataStreamGlobalRetention globalRetention = new DataStreamGlobalRetention( TimeValue.timeValueDays(randomIntBetween(1, 5)), TimeValue.timeValueDays(randomIntBetween(5, 10)) ); - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProviderWithSettings = new DataStreamGlobalRetentionProvider( - new DataStreamFactoryRetention() { - @Override - public TimeValue getMaxRetention() { - return globalRetention.maxRetention(); - } - - @Override - public TimeValue getDefaultRetention() { - return globalRetention.defaultRetention(); - } - - @Override - public void init(ClusterSettings clusterSettings) { - - } - } + DataStreamGlobalRetentionSettings withGlobalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings( + Settings.builder() + .put( + DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), + globalRetention.defaultRetention() + ) + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING.getKey(), globalRetention.maxRetention()) + .build() + ), + DataStreamFactoryRetention.emptyFactoryRetention() ); response = GetDataStreamsTransportAction.innerOperation( state, @@ -364,7 +359,7 @@ public void init(ClusterSettings clusterSettings) { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProviderWithSettings + withGlobalRetentionSettings ); assertThat(response.getGlobalRetention(), equalTo(globalRetention)); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index 77b4d5f21529b..8cb27fd9fd282 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -37,7 +37,7 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.DataStreamLifecycle.Downsampling; import org.elasticsearch.cluster.metadata.DataStreamLifecycle.Downsampling.Round; @@ -138,7 +138,8 @@ public class DataStreamLifecycleServiceTests extends ESTestCase { private List clientSeenRequests; private DoExecuteDelegate clientDelegate; private ClusterService clusterService; - private final DataStreamGlobalRetentionProvider globalRetentionResolver = new DataStreamGlobalRetentionProvider( + private final DataStreamGlobalRetentionSettings globalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), DataStreamFactoryRetention.emptyFactoryRetention() ); @@ -187,7 +188,7 @@ public void setupServices() { errorStore, new FeatureService(List.of(new DataStreamFeatures())) ), - globalRetentionResolver + globalRetentionSettings ); clientDelegate = null; dataStreamLifecycleService.init(); @@ -1426,7 +1427,7 @@ public void testTrackingTimeStats() { errorStore, new FeatureService(List.of(new DataStreamFeatures())) ), - globalRetentionResolver + globalRetentionSettings ); assertThat(service.getLastRunDuration(), is(nullValue())); assertThat(service.getTimeBetweenStarts(), is(nullValue())); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java index 5b96f92193e98..be42916b07956 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java @@ -17,7 +17,9 @@ * Holds the factory retention configuration. Factory retention is the global retention configuration meant to be * used if a user hasn't provided other retention configuration via {@link DataStreamGlobalRetention} metadata in the * cluster state. + * @deprecated This interface is deprecated, please use {@link DataStreamGlobalRetentionSettings}. */ +@Deprecated public interface DataStreamFactoryRetention { @Nullable diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java index c74daa22cc137..185f625f6f91f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java @@ -18,14 +18,10 @@ import java.io.IOException; /** - * A cluster state entry that contains global retention settings that are configurable by the user. These settings include: - * - default retention, applied on any data stream managed by DSL that does not have an explicit retention defined - * - max retention, applied on every data stream managed by DSL + * Wrapper class for the {@link DataStreamGlobalRetentionSettings}. */ public record DataStreamGlobalRetention(@Nullable TimeValue defaultRetention, @Nullable TimeValue maxRetention) implements Writeable { - public static final String TYPE = "data-stream-global-retention"; - public static final NodeFeature GLOBAL_RETENTION = new NodeFeature("data_stream.lifecycle.global_retention"); public static final TimeValue MIN_RETENTION_VALUE = TimeValue.timeValueSeconds(10); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProvider.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProvider.java deleted file mode 100644 index f1e3e18ea4d51..0000000000000 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProvider.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.cluster.metadata; - -import org.elasticsearch.core.Nullable; - -/** - * Provides the global retention configuration for data stream lifecycle as defined in the settings. - */ -public class DataStreamGlobalRetentionProvider { - - private final DataStreamFactoryRetention factoryRetention; - - public DataStreamGlobalRetentionProvider(DataStreamFactoryRetention factoryRetention) { - this.factoryRetention = factoryRetention; - } - - /** - * Return the global retention configuration as defined in the settings. If both settings are null, it returns null. - */ - @Nullable - public DataStreamGlobalRetention provide() { - if (factoryRetention.isDefined() == false) { - return null; - } - return new DataStreamGlobalRetention(factoryRetention.getDefaultRetention(), factoryRetention.getMaxRetention()); - } -} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettings.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettings.java new file mode 100644 index 0000000000000..a1fcf56a92726 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettings.java @@ -0,0 +1,180 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * This class holds the data stream global retention settings. It defines, validates and monitors the settings. + *

+ * The global retention settings apply to non-system data streams that are managed by the data stream lifecycle. They consist of: + * - The default retention which applies to data streams that do not have a retention defined. + * - The max retention which applies to all data streams that do not have retention or their retention has exceeded this value. + *

+ * Temporarily, we fall back to {@link DataStreamFactoryRetention} to facilitate a smooth transition to these settings. + */ +public class DataStreamGlobalRetentionSettings { + + private static final Logger logger = LogManager.getLogger(DataStreamGlobalRetentionSettings.class); + public static final TimeValue MIN_RETENTION_VALUE = TimeValue.timeValueSeconds(10); + + public static final Setting DATA_STREAMS_DEFAULT_RETENTION_SETTING = Setting.timeSetting( + "data_streams.lifecycle.retention.default", + TimeValue.MINUS_ONE, + new Setting.Validator<>() { + @Override + public void validate(TimeValue value) {} + + @Override + public void validate(final TimeValue settingValue, final Map, Object> settings) { + TimeValue defaultRetention = getSettingValueOrNull(settingValue); + TimeValue maxRetention = getSettingValueOrNull((TimeValue) settings.get(DATA_STREAMS_MAX_RETENTION_SETTING)); + validateIsolatedRetentionValue(defaultRetention, DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey()); + validateGlobalRetentionConfiguration(defaultRetention, maxRetention); + } + + @Override + public Iterator> settings() { + final List> settings = List.of(DATA_STREAMS_MAX_RETENTION_SETTING); + return settings.iterator(); + } + }, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + public static final Setting DATA_STREAMS_MAX_RETENTION_SETTING = Setting.timeSetting( + "data_streams.lifecycle.retention.max", + TimeValue.MINUS_ONE, + new Setting.Validator<>() { + @Override + public void validate(TimeValue value) {} + + @Override + public void validate(final TimeValue settingValue, final Map, Object> settings) { + TimeValue defaultRetention = getSettingValueOrNull((TimeValue) settings.get(DATA_STREAMS_DEFAULT_RETENTION_SETTING)); + TimeValue maxRetention = getSettingValueOrNull(settingValue); + validateIsolatedRetentionValue(maxRetention, DATA_STREAMS_MAX_RETENTION_SETTING.getKey()); + validateGlobalRetentionConfiguration(defaultRetention, maxRetention); + } + + @Override + public Iterator> settings() { + final List> settings = List.of(DATA_STREAMS_DEFAULT_RETENTION_SETTING); + return settings.iterator(); + } + }, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private final DataStreamFactoryRetention factoryRetention; + + @Nullable + private volatile TimeValue defaultRetention; + @Nullable + private volatile TimeValue maxRetention; + + private DataStreamGlobalRetentionSettings(DataStreamFactoryRetention factoryRetention) { + this.factoryRetention = factoryRetention; + } + + @Nullable + public TimeValue getMaxRetention() { + return shouldFallbackToFactorySettings() ? factoryRetention.getMaxRetention() : maxRetention; + } + + @Nullable + public TimeValue getDefaultRetention() { + return shouldFallbackToFactorySettings() ? factoryRetention.getDefaultRetention() : defaultRetention; + } + + public boolean areDefined() { + return getDefaultRetention() != null || getMaxRetention() != null; + } + + private boolean shouldFallbackToFactorySettings() { + return defaultRetention == null && maxRetention == null; + } + + /** + * Creates an instance and initialises the cluster settings listeners + * @param clusterSettings it will register the cluster settings listeners to monitor for changes + * @param factoryRetention for migration purposes, it will be removed shortly + */ + public static DataStreamGlobalRetentionSettings create(ClusterSettings clusterSettings, DataStreamFactoryRetention factoryRetention) { + DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = new DataStreamGlobalRetentionSettings(factoryRetention); + clusterSettings.initializeAndWatch(DATA_STREAMS_DEFAULT_RETENTION_SETTING, dataStreamGlobalRetentionSettings::setDefaultRetention); + clusterSettings.initializeAndWatch(DATA_STREAMS_MAX_RETENTION_SETTING, dataStreamGlobalRetentionSettings::setMaxRetention); + return dataStreamGlobalRetentionSettings; + } + + private void setMaxRetention(TimeValue maxRetention) { + this.maxRetention = getSettingValueOrNull(maxRetention); + logger.info("Updated max factory retention to [{}]", this.maxRetention == null ? null : maxRetention.getStringRep()); + } + + private void setDefaultRetention(TimeValue defaultRetention) { + this.defaultRetention = getSettingValueOrNull(defaultRetention); + logger.info("Updated default factory retention to [{}]", this.defaultRetention == null ? null : defaultRetention.getStringRep()); + } + + private static void validateIsolatedRetentionValue(@Nullable TimeValue retention, String settingName) { + if (retention != null && retention.getMillis() < MIN_RETENTION_VALUE.getMillis()) { + throw new IllegalArgumentException( + "Setting '" + settingName + "' should be greater than " + MIN_RETENTION_VALUE.getStringRep() + ); + } + } + + private static void validateGlobalRetentionConfiguration(@Nullable TimeValue defaultRetention, @Nullable TimeValue maxRetention) { + if (defaultRetention != null && maxRetention != null && defaultRetention.getMillis() > maxRetention.getMillis()) { + throw new IllegalArgumentException( + "Setting [" + + DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey() + + "=" + + defaultRetention.getStringRep() + + "] cannot be greater than [" + + DATA_STREAMS_MAX_RETENTION_SETTING.getKey() + + "=" + + maxRetention.getStringRep() + + "]." + ); + } + } + + @Nullable + public DataStreamGlobalRetention get() { + if (areDefined() == false) { + return null; + } + return new DataStreamGlobalRetention(getDefaultRetention(), getMaxRetention()); + } + + /** + * Time value settings do not accept null as a value. To represent an undefined retention as a setting we use the value + * of -1 and this method converts this to null. + * + * @param value the retention as parsed from the setting + * @return the value when it is not -1 and null otherwise + */ + @Nullable + private static TimeValue getSettingValueOrNull(TimeValue value) { + return value == null || value.equals(TimeValue.MINUS_ONE) ? null : value; + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java index bfe7468b97a64..9cac6fa3e8796 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java @@ -41,18 +41,18 @@ public class MetadataDataStreamsService { private final ClusterService clusterService; private final IndicesService indicesService; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; private final MasterServiceTaskQueue updateLifecycleTaskQueue; private final MasterServiceTaskQueue setRolloverOnWriteTaskQueue; public MetadataDataStreamsService( ClusterService clusterService, IndicesService indicesService, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { this.clusterService = clusterService; this.indicesService = indicesService; - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; ClusterStateTaskExecutor updateLifecycleExecutor = new SimpleBatchedAckListenerTaskExecutor<>() { @Override @@ -223,7 +223,7 @@ ClusterState updateDataLifecycle(ClusterState currentState, List dataStr if (lifecycle != null) { if (atLeastOneDataStreamIsNotSystem) { // We don't issue any warnings if all data streams are system data streams - lifecycle.addWarningHeaderIfDataRetentionNotEffective(globalRetentionResolver.provide()); + lifecycle.addWarningHeaderIfDataRetentionNotEffective(globalRetentionSettings.get()); } } return ClusterState.builder(currentState).metadata(builder.build()).build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index c6eb56926eca0..ac56f3f670f43 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -137,7 +137,7 @@ public class MetadataIndexTemplateService { private final NamedXContentRegistry xContentRegistry; private final SystemIndices systemIndices; private final Set indexSettingProviders; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; /** * This is the cluster state task executor for all template-based actions. @@ -183,7 +183,7 @@ public MetadataIndexTemplateService( NamedXContentRegistry xContentRegistry, SystemIndices systemIndices, IndexSettingProviders indexSettingProviders, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { this.clusterService = clusterService; this.taskQueue = clusterService.createTaskQueue("index-templates", Priority.URGENT, TEMPLATE_TASK_EXECUTOR); @@ -193,7 +193,7 @@ public MetadataIndexTemplateService( this.xContentRegistry = xContentRegistry; this.systemIndices = systemIndices; this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; } public void removeTemplates( @@ -345,7 +345,7 @@ public ClusterState addComponentTemplate( tempStateWithComponentTemplateAdded.metadata(), composableTemplateName, composableTemplate, - globalRetentionResolver.provide() + globalRetentionSettings.get() ); validateIndexTemplateV2(composableTemplateName, composableTemplate, tempStateWithComponentTemplateAdded); } catch (Exception e) { @@ -369,7 +369,7 @@ public ClusterState addComponentTemplate( } if (finalComponentTemplate.template().lifecycle() != null) { - finalComponentTemplate.template().lifecycle().addWarningHeaderIfDataRetentionNotEffective(globalRetentionResolver.provide()); + finalComponentTemplate.template().lifecycle().addWarningHeaderIfDataRetentionNotEffective(globalRetentionSettings.get()); } logger.info("{} component template [{}]", existing == null ? "adding" : "updating", name); @@ -730,7 +730,7 @@ private void validateIndexTemplateV2(String name, ComposableIndexTemplate indexT validate(name, templateToValidate); validateDataStreamsStillReferenced(currentState, name, templateToValidate); - validateLifecycle(currentState.metadata(), name, templateToValidate, globalRetentionResolver.provide()); + validateLifecycle(currentState.metadata(), name, templateToValidate, globalRetentionSettings.get()); if (templateToValidate.isDeprecated() == false) { validateUseOfDeprecatedComponentTemplates(name, templateToValidate, currentState.metadata().componentTemplates()); diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index d5f770ebb95fc..c023b00ec820f 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -36,6 +36,7 @@ import org.elasticsearch.cluster.coordination.MasterHistory; import org.elasticsearch.cluster.coordination.NoMasterBlockService; import org.elasticsearch.cluster.coordination.Reconfigurator; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.Metadata; @@ -598,6 +599,8 @@ public void apply(Settings value, Settings current, Settings previous) { TDigestExecutionHint.SETTING, MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT_SETTING, MergePolicyConfig.DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING, - TransportService.ENABLE_STACK_OVERFLOW_AVOIDANCE + TransportService.ENABLE_STACK_OVERFLOW_AVOIDANCE, + DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING, + DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING ).filter(Objects::nonNull).collect(Collectors.toSet()); } diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 27a82cf6a2501..a4db9a0a0e149 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -42,7 +42,7 @@ import org.elasticsearch.cluster.coordination.StableMasterHealthIndicatorService; import org.elasticsearch.cluster.features.NodeFeaturesFixupListener; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService; @@ -588,25 +588,27 @@ private ScriptService createScriptService(SettingsModule settingsModule, ThreadP return scriptService; } - private DataStreamGlobalRetentionProvider createDataStreamServicesAndGlobalRetentionResolver( + private DataStreamGlobalRetentionSettings createDataStreamServicesAndGlobalRetentionResolver( + Settings settings, ThreadPool threadPool, ClusterService clusterService, IndicesService indicesService, MetadataCreateIndexService metadataCreateIndexService ) { - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider = new DataStreamGlobalRetentionProvider( + DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = DataStreamGlobalRetentionSettings.create( + clusterService.getClusterSettings(), DataStreamFactoryRetention.load(pluginsService, clusterService.getClusterSettings()) ); - modules.bindToInstance(DataStreamGlobalRetentionProvider.class, dataStreamGlobalRetentionProvider); + modules.bindToInstance(DataStreamGlobalRetentionSettings.class, dataStreamGlobalRetentionSettings); modules.bindToInstance( MetadataCreateDataStreamService.class, new MetadataCreateDataStreamService(threadPool, clusterService, metadataCreateIndexService) ); modules.bindToInstance( MetadataDataStreamsService.class, - new MetadataDataStreamsService(clusterService, indicesService, dataStreamGlobalRetentionProvider) + new MetadataDataStreamsService(clusterService, indicesService, dataStreamGlobalRetentionSettings) ); - return dataStreamGlobalRetentionProvider; + return dataStreamGlobalRetentionSettings; } private UpdateHelper createUpdateHelper(DocumentParsingProvider documentParsingProvider, ScriptService scriptService) { @@ -815,7 +817,8 @@ private void construct( threadPool ); - final DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider = createDataStreamServicesAndGlobalRetentionResolver( + final DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = createDataStreamServicesAndGlobalRetentionResolver( + settings, threadPool, clusterService, indicesService, @@ -840,7 +843,7 @@ record PluginServiceInstances( IndicesService indicesService, FeatureService featureService, SystemIndices systemIndices, - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider, + DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings, DocumentParsingProvider documentParsingProvider ) implements Plugin.PluginServices {} PluginServiceInstances pluginServices = new PluginServiceInstances( @@ -861,7 +864,7 @@ record PluginServiceInstances( indicesService, featureService, systemIndices, - dataStreamGlobalRetentionProvider, + dataStreamGlobalRetentionSettings, documentParsingProvider ); @@ -895,7 +898,7 @@ record PluginServiceInstances( systemIndices, indexSettingProviders, metadataCreateIndexService, - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ), pluginsService.loadSingletonServiceProvider(RestExtension.class, RestExtension::allowAll) ); @@ -1465,7 +1468,7 @@ private List> buildReservedStateHandlers( SystemIndices systemIndices, IndexSettingProviders indexSettingProviders, MetadataCreateIndexService metadataCreateIndexService, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { List> reservedStateHandlers = new ArrayList<>(); @@ -1480,7 +1483,7 @@ private List> buildReservedStateHandlers( xContentRegistry, systemIndices, indexSettingProviders, - globalRetentionResolver + globalRetentionSettings ); reservedStateHandlers.add(new ReservedComposableIndexTemplateAction(templateService, settingsModule.getIndexScopedSettings())); diff --git a/server/src/main/java/org/elasticsearch/plugins/Plugin.java b/server/src/main/java/org/elasticsearch/plugins/Plugin.java index 1815f4403019f..a8bfda54b0646 100644 --- a/server/src/main/java/org/elasticsearch/plugins/Plugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/Plugin.java @@ -10,7 +10,7 @@ import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.routing.RerouteService; @@ -156,10 +156,10 @@ public interface PluginServices { SystemIndices systemIndices(); /** - * A service that resolves the data stream global retention that applies to + * A service that holds the data stream global retention settings that applies to * data streams managed by the data stream lifecycle. */ - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider(); + DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings(); /** * A provider of utilities to observe and report parsing of documents diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java index b2a29e2bcfeb7..32a74fef61209 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java @@ -18,7 +18,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; @@ -75,7 +76,7 @@ public class ReservedComposableIndexTemplateActionTests extends ESTestCase { ClusterService clusterService; IndexScopedSettings indexScopedSettings; IndicesService indicesService; - private DataStreamGlobalRetentionProvider globalRetentionResolver; + private DataStreamGlobalRetentionSettings globalRetentionSettings; @Before public void setup() throws IOException { @@ -92,7 +93,10 @@ public void setup() throws IOException { doReturn(mapperService).when(indexService).mapperService(); doReturn(indexService).when(indicesService).createIndex(any(), any(), anyBoolean()); - globalRetentionResolver = new DataStreamGlobalRetentionProvider(DataStreamFactoryRetention.emptyFactoryRetention()); + globalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ); templateService = new MetadataIndexTemplateService( clusterService, mock(MetadataCreateIndexService.class), @@ -101,7 +105,7 @@ public void setup() throws IOException { mock(NamedXContentRegistry.class), mock(SystemIndices.class), new IndexSettingProviders(Set.of()), - globalRetentionResolver + globalRetentionSettings ); } @@ -896,7 +900,7 @@ public void testTemplatesWithReservedPrefix() throws Exception { mock(NamedXContentRegistry.class), mock(SystemIndices.class), new IndexSettingProviders(Set.of()), - globalRetentionResolver + globalRetentionSettings ); ClusterState state = ClusterState.builder(new ClusterName("elasticsearch")).metadata(metadata).build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProviderTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProviderTests.java deleted file mode 100644 index f22664ea5b7d0..0000000000000 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProviderTests.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.cluster.metadata; - -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.test.ESTestCase; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -public class DataStreamGlobalRetentionProviderTests extends ESTestCase { - - public void testOnlyFactoryRetentionFallback() { - DataStreamFactoryRetention factoryRetention = randomNonEmptyFactoryRetention(); - DataStreamGlobalRetentionProvider resolver = new DataStreamGlobalRetentionProvider(factoryRetention); - DataStreamGlobalRetention globalRetention = resolver.provide(); - assertThat(globalRetention, notNullValue()); - assertThat(globalRetention.defaultRetention(), equalTo(factoryRetention.getDefaultRetention())); - assertThat(globalRetention.maxRetention(), equalTo(factoryRetention.getMaxRetention())); - } - - private static DataStreamFactoryRetention randomNonEmptyFactoryRetention() { - boolean withDefault = randomBoolean(); - TimeValue defaultRetention = withDefault ? TimeValue.timeValueDays(randomIntBetween(10, 20)) : null; - TimeValue maxRetention = withDefault && randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(50, 200)); - return new DataStreamFactoryRetention() { - @Override - public TimeValue getMaxRetention() { - return maxRetention; - } - - @Override - public TimeValue getDefaultRetention() { - return defaultRetention; - } - - @Override - public void init(ClusterSettings clusterSettings) { - - } - }; - } - - public void testNoRetentionConfiguration() { - DataStreamGlobalRetentionProvider resolver = new DataStreamGlobalRetentionProvider( - DataStreamFactoryRetention.emptyFactoryRetention() - ); - assertThat(resolver.provide(), nullValue()); - } -} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettingsTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettingsTests.java new file mode 100644 index 0000000000000..78184fd7568e5 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettingsTests.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class DataStreamGlobalRetentionSettingsTests extends ESTestCase { + + public void testDefaults() { + DataStreamGlobalRetentionSettings globalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ); + + assertThat(globalRetentionSettings.getDefaultRetention(), nullValue()); + assertThat(globalRetentionSettings.getMaxRetention(), nullValue()); + + // Fallback to factory settings + TimeValue maxFactoryValue = randomPositiveTimeValue(); + TimeValue defaultFactoryValue = randomPositiveTimeValue(); + DataStreamGlobalRetentionSettings withFactorySettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + new DataStreamFactoryRetention() { + @Override + public TimeValue getMaxRetention() { + return maxFactoryValue; + } + + @Override + public TimeValue getDefaultRetention() { + return defaultFactoryValue; + } + + @Override + public void init(ClusterSettings clusterSettings) { + + } + } + ); + + assertThat(withFactorySettings.getDefaultRetention(), equalTo(defaultFactoryValue)); + assertThat(withFactorySettings.getMaxRetention(), equalTo(maxFactoryValue)); + } + + public void testMonitorsDefaultRetention() { + ClusterSettings clusterSettings = ClusterSettings.createBuiltInClusterSettings(); + DataStreamGlobalRetentionSettings globalRetentionSettings = DataStreamGlobalRetentionSettings.create( + clusterSettings, + DataStreamFactoryRetention.emptyFactoryRetention() + ); + + // Test valid update + TimeValue newDefaultRetention = TimeValue.timeValueDays(randomIntBetween(1, 10)); + Settings newSettings = Settings.builder() + .put( + DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), + newDefaultRetention.toHumanReadableString(0) + ) + .build(); + clusterSettings.applySettings(newSettings); + + assertThat(newDefaultRetention, equalTo(globalRetentionSettings.getDefaultRetention())); + + // Test invalid update + Settings newInvalidSettings = Settings.builder() + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), TimeValue.ZERO) + .build(); + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings(newInvalidSettings) + ); + assertThat( + exception.getCause().getMessage(), + containsString("Setting 'data_streams.lifecycle.retention.default' should be greater than") + ); + } + + public void testMonitorsMaxRetention() { + ClusterSettings clusterSettings = ClusterSettings.createBuiltInClusterSettings(); + DataStreamGlobalRetentionSettings globalRetentionSettings = DataStreamGlobalRetentionSettings.create( + clusterSettings, + DataStreamFactoryRetention.emptyFactoryRetention() + ); + + // Test valid update + TimeValue newMaxRetention = TimeValue.timeValueDays(randomIntBetween(10, 30)); + Settings newSettings = Settings.builder() + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING.getKey(), newMaxRetention.toHumanReadableString(0)) + .build(); + clusterSettings.applySettings(newSettings); + + assertThat(newMaxRetention, equalTo(globalRetentionSettings.getMaxRetention())); + + // Test invalid update + Settings newInvalidSettings = Settings.builder() + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING.getKey(), TimeValue.ZERO) + .build(); + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings(newInvalidSettings) + ); + assertThat( + exception.getCause().getMessage(), + containsString("Setting 'data_streams.lifecycle.retention.max' should be greater than") + ); + } + + public void testCombinationValidation() { + ClusterSettings clusterSettings = ClusterSettings.createBuiltInClusterSettings(); + DataStreamGlobalRetentionSettings.create(clusterSettings, DataStreamFactoryRetention.emptyFactoryRetention()); + + // Test invalid update + Settings newInvalidSettings = Settings.builder() + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), TimeValue.timeValueDays(90)) + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING.getKey(), TimeValue.timeValueDays(30)) + .build(); + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings(newInvalidSettings) + ); + assertThat( + exception.getCause().getMessage(), + containsString( + "Setting [data_streams.lifecycle.retention.default=90d] cannot be greater than [data_streams.lifecycle.retention.max=30d]" + ) + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleWithRetentionWarningsTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleWithRetentionWarningsTests.java index acfe2b4f847c4..f6417da4fa2da 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleWithRetentionWarningsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleWithRetentionWarningsTests.java @@ -128,16 +128,22 @@ public void testUpdatingLifecycleOnADataStream() { HeaderWarning.setThreadContext(threadContext); String dataStream = randomAlphaOfLength(5); TimeValue defaultRetention = randomTimeValue(2, 100, TimeUnit.DAYS); - - DataStreamFactoryRetention factoryRetention = getDefaultFactoryRetention(defaultRetention); ClusterState before = ClusterState.builder( DataStreamTestHelper.getClusterStateWithDataStreams(List.of(new Tuple<>(dataStream, 2)), List.of()) ).build(); + Settings settingsWithDefaultRetention = builder().put( + DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), + defaultRetention + ).build(); + MetadataDataStreamsService metadataDataStreamsService = new MetadataDataStreamsService( mock(ClusterService.class), mock(IndicesService.class), - new DataStreamGlobalRetentionProvider(factoryRetention) + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(settingsWithDefaultRetention), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); ClusterState after = metadataDataStreamsService.updateDataLifecycle(before, List.of(dataStream), DataStreamLifecycle.DEFAULT); @@ -245,7 +251,9 @@ public void testValidateLifecycleInComponentTemplate() throws Exception { new IndexSettingProviders(Set.of()) ); TimeValue defaultRetention = randomTimeValue(2, 100, TimeUnit.DAYS); - DataStreamFactoryRetention factoryRetention = getDefaultFactoryRetention(defaultRetention); + Settings settingsWithDefaultRetention = Settings.builder() + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), defaultRetention) + .build(); ClusterState state = ClusterState.EMPTY_STATE; MetadataIndexTemplateService metadataIndexTemplateService = new MetadataIndexTemplateService( clusterService, @@ -255,7 +263,10 @@ public void testValidateLifecycleInComponentTemplate() throws Exception { xContentRegistry(), EmptySystemIndices.INSTANCE, new IndexSettingProviders(Set.of()), - new DataStreamGlobalRetentionProvider(factoryRetention) + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(settingsWithDefaultRetention), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); @@ -283,23 +294,4 @@ public void testValidateLifecycleInComponentTemplate() throws Exception { ) ); } - - private DataStreamFactoryRetention getDefaultFactoryRetention(TimeValue defaultRetention) { - return new DataStreamFactoryRetention() { - @Override - public TimeValue getMaxRetention() { - return null; - } - - @Override - public TimeValue getDefaultRetention() { - return defaultRetention; - } - - @Override - public void init(ClusterSettings clusterSettings) { - - } - }; - } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java index 7ce418301a352..e0f4936300c0e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; @@ -400,7 +401,10 @@ public void testUpdateLifecycle() { MetadataDataStreamsService service = new MetadataDataStreamsService( mock(ClusterService.class), mock(IndicesService.class), - new DataStreamGlobalRetentionProvider(DataStreamFactoryRetention.emptyFactoryRetention()) + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); { // Remove lifecycle diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index f5daac8ecd090..e66dd32b718b7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.PutRequest; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -2501,7 +2502,10 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr xContentRegistry, EmptySystemIndices.INSTANCE, new IndexSettingProviders(Set.of()), - new DataStreamGlobalRetentionProvider(DataStreamFactoryRetention.emptyFactoryRetention()) + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); final List throwables = new ArrayList<>(); @@ -2543,9 +2547,6 @@ public void onFailure(Exception e) { private MetadataIndexTemplateService getMetadataIndexTemplateService() { IndicesService indicesService = getInstanceFromNode(IndicesService.class); ClusterService clusterService = getInstanceFromNode(ClusterService.class); - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider = new DataStreamGlobalRetentionProvider( - DataStreamFactoryRetention.emptyFactoryRetention() - ); MetadataCreateIndexService createIndexService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, @@ -2568,7 +2569,10 @@ private MetadataIndexTemplateService getMetadataIndexTemplateService() { xContentRegistry(), EmptySystemIndices.INSTANCE, new IndexSettingProviders(Set.of()), - dataStreamGlobalRetentionProvider + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); } From e3bf795659ab2409b8bcd0804669e6602a1a30db Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 20 Aug 2024 17:11:25 +1000 Subject: [PATCH 086/389] Mute org.elasticsearch.xpack.esql.qa.mixed.FieldExtractorIT testScaledFloat #112003 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index dd4dd2c7f2ec7..95fb4a32b4227 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -184,6 +184,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/111923 - class: org.elasticsearch.xpack.test.rest.XPackRestIT issue: https://github.com/elastic/elasticsearch/issues/111944 +- class: org.elasticsearch.xpack.esql.qa.mixed.FieldExtractorIT + method: testScaledFloat + issue: https://github.com/elastic/elasticsearch/issues/112003 # Examples: # From 3390a82ef65d3c58f9e17e7eb5ae584f2691889e Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 20 Aug 2024 08:54:58 +0100 Subject: [PATCH 087/389] Remove `SnapshotDeleteListener` (#111988) This special listener is awkward to handle since it does not fit into the usual `ActionListener` framework. Moreover there's no need for it, we can have a regular listener and then a separate `Runnable` for tracking the completion of the cleanup actions. --- .../repositories/s3/S3Repository.java | 81 ++++---------- .../repositories/FilterRepository.java | 6 +- .../repositories/InvalidRepository.java | 6 +- .../repositories/Repository.java | 11 +- .../repositories/UnknownTypeRepository.java | 6 +- .../blobstore/BlobStoreRepository.java | 105 ++++++++---------- .../snapshots/SnapshotDeleteListener.java | 35 ------ .../snapshots/SnapshotsService.java | 18 ++- .../RepositoriesServiceTests.java | 6 +- .../index/shard/RestoreOnlyRepository.java | 6 +- .../xpack/ccr/repository/CcrRepository.java | 6 +- 11 files changed, 97 insertions(+), 189 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/snapshots/SnapshotDeleteListener.java diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index a6edb0dec4122..d75a3e8ad433e 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -37,7 +37,6 @@ import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.Scheduler; @@ -320,7 +319,7 @@ public void finalizeSnapshot(final FinalizeSnapshotContext finalizeSnapshotConte finalizeSnapshotContext.clusterMetadata(), finalizeSnapshotContext.snapshotInfo(), finalizeSnapshotContext.repositoryMetaVersion(), - delayedListener(ActionListener.runAfter(finalizeSnapshotContext, () -> metadataDone.onResponse(null))), + wrapWithWeakConsistencyProtection(ActionListener.runAfter(finalizeSnapshotContext, () -> metadataDone.onResponse(null))), info -> metadataDone.addListener(new ActionListener<>() { @Override public void onResponse(Void unused) { @@ -339,50 +338,19 @@ public void onFailure(Exception e) { super.finalizeSnapshot(wrappedFinalizeContext); } - @Override - protected SnapshotDeleteListener wrapWithWeakConsistencyProtection(SnapshotDeleteListener listener) { - return new SnapshotDeleteListener() { - @Override - public void onDone() { - listener.onDone(); - } - - @Override - public void onRepositoryDataWritten(RepositoryData repositoryData) { - logCooldownInfo(); - final Scheduler.Cancellable existing = finalizationFuture.getAndSet(threadPool.schedule(() -> { - final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); - assert cancellable != null; - listener.onRepositoryDataWritten(repositoryData); - }, coolDown, snapshotExecutor)); - assert existing == null : "Already have an ongoing finalization " + finalizationFuture; - } - - @Override - public void onFailure(Exception e) { - logCooldownInfo(); - final Scheduler.Cancellable existing = finalizationFuture.getAndSet(threadPool.schedule(() -> { - final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); - assert cancellable != null; - listener.onFailure(e); - }, coolDown, snapshotExecutor)); - assert existing == null : "Already have an ongoing finalization " + finalizationFuture; - } - }; - } - /** * Wraps given listener such that it is executed with a delay of {@link #coolDown} on the snapshot thread-pool after being invoked. * See {@link #COOLDOWN_PERIOD} for details. */ - private ActionListener delayedListener(ActionListener listener) { - final ActionListener wrappedListener = ActionListener.runBefore(listener, () -> { + @Override + protected ActionListener wrapWithWeakConsistencyProtection(ActionListener listener) { + final ActionListener wrappedListener = ActionListener.runBefore(listener, () -> { final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); assert cancellable != null; }); return new ActionListener<>() { @Override - public void onResponse(T response) { + public void onResponse(RepositoryData response) { logCooldownInfo(); final Scheduler.Cancellable existing = finalizationFuture.getAndSet( threadPool.schedule(ActionRunnable.wrap(wrappedListener, l -> l.onResponse(response)), coolDown, snapshotExecutor) @@ -483,43 +451,34 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener snapshotDeleteListener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { getMultipartUploadCleanupListener( isReadOnly() ? 0 : MAX_MULTIPART_UPLOAD_CLEANUP_SIZE.get(getMetadata().settings()), new ActionListener<>() { @Override public void onResponse(ActionListener multipartUploadCleanupListener) { - S3Repository.super.deleteSnapshots( - snapshotIds, - repositoryDataGeneration, - minimumNodeVersion, - new SnapshotDeleteListener() { - @Override - public void onDone() { - snapshotDeleteListener.onDone(); - } - - @Override - public void onRepositoryDataWritten(RepositoryData repositoryData) { - multipartUploadCleanupListener.onResponse(null); - snapshotDeleteListener.onRepositoryDataWritten(repositoryData); - } - - @Override - public void onFailure(Exception e) { - multipartUploadCleanupListener.onFailure(e); - snapshotDeleteListener.onFailure(e); - } + S3Repository.super.deleteSnapshots(snapshotIds, repositoryDataGeneration, minimumNodeVersion, new ActionListener<>() { + @Override + public void onResponse(RepositoryData repositoryData) { + multipartUploadCleanupListener.onResponse(null); + repositoryDataUpdateListener.onResponse(repositoryData); + } + + @Override + public void onFailure(Exception e) { + multipartUploadCleanupListener.onFailure(e); + repositoryDataUpdateListener.onFailure(e); } - ); + }, onCompletion); } @Override public void onFailure(Exception e) { logger.warn("failed to get multipart uploads for cleanup during snapshot delete", e); assert false : e; // getMultipartUploadCleanupListener doesn't throw and snapshotExecutor doesn't reject anything - snapshotDeleteListener.onFailure(e); + repositoryDataUpdateListener.onFailure(e); } } ); diff --git a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java index 37f1850c1fb2d..67d59924652db 100644 --- a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java @@ -22,7 +22,6 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -85,9 +84,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - in.deleteSnapshots(snapshotIds, repositoryDataGeneration, minimumNodeVersion, listener); + in.deleteSnapshots(snapshotIds, repositoryDataGeneration, minimumNodeVersion, repositoryDataUpdateListener, onCompletion); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java b/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java index 948ae747e11a9..2aba6fbbebce2 100644 --- a/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java @@ -21,7 +21,6 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -92,9 +91,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - listener.onFailure(createCreationException()); + repositoryDataUpdateListener.onFailure(createCreationException()); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index 06a53053bca88..fd52c21cad3f8 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -22,7 +22,6 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.threadpool.ThreadPool; @@ -161,13 +160,19 @@ public void onFailure(Exception e) { * @param repositoryDataGeneration the generation of the {@link RepositoryData} in the repository at the start of the deletion * @param minimumNodeVersion the minimum {@link IndexVersion} across the nodes in the cluster, with which the repository * format must remain compatible - * @param listener completion listener, see {@link SnapshotDeleteListener}. + * @param repositoryDataUpdateListener listener completed when the {@link RepositoryData} is updated, or when the process fails + * without changing the repository contents - in either case, it is now safe for the next operation + * on this repository to proceed. + * @param onCompletion action executed on completion of the cleanup actions that follow a successful + * {@link RepositoryData} update; not called if {@code repositoryDataUpdateListener} completes + * exceptionally. */ void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ); /** diff --git a/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java b/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java index 7821c865e166c..853de48a483a1 100644 --- a/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java @@ -21,7 +21,6 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -90,9 +89,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - listener.onFailure(createUnknownTypeException()); + repositoryDataUpdateListener.onFailure(createUnknownTypeException()); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index ddef1e1b808fe..e8af752bec179 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -123,7 +123,6 @@ import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.snapshots.AbortedSnapshotException; import org.elasticsearch.snapshots.PausedSnapshotException; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotException; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -847,8 +846,8 @@ private RepositoryData safeRepositoryData(long repositoryDataGeneration, Map wrapWithWeakConsistencyProtection(ActionListener listener) { + return listener; } @Override @@ -856,19 +855,15 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - createSnapshotsDeletion(snapshotIds, repositoryDataGeneration, minimumNodeVersion, new ActionListener<>() { - @Override - public void onResponse(SnapshotsDeletion snapshotsDeletion) { - snapshotsDeletion.runDelete(listener); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + createSnapshotsDeletion( + snapshotIds, + repositoryDataGeneration, + minimumNodeVersion, + repositoryDataUpdateListener.delegateFailureAndWrap((l, snapshotsDeletion) -> snapshotsDeletion.runDelete(l, onCompletion)) + ); } /** @@ -933,7 +928,7 @@ private void createSnapshotsDeletion( * *

* Until the {@link RepositoryData} is updated there should be no other activities in the repository, and in particular the root - * blob must not change until it is updated by this deletion and {@link SnapshotDeleteListener#onRepositoryDataWritten} is called. + * blob must not change until it is updated by this deletion and the {@code repositoryDataUpdateListener} is completed. *

*/ class SnapshotsDeletion { @@ -1027,40 +1022,29 @@ class SnapshotsDeletion { // --------------------------------------------------------------------------------------------------------------------------------- // The overall flow of execution - void runDelete(SnapshotDeleteListener listener) { - final var releasingListener = new SnapshotDeleteListener() { - @Override - public void onDone() { - try { - shardBlobsToDelete.close(); - } finally { - listener.onDone(); - } - } - - @Override - public void onRepositoryDataWritten(RepositoryData repositoryData) { - listener.onRepositoryDataWritten(repositoryData); + void runDelete(ActionListener repositoryDataUpdateListener, Runnable onCompletion) { + final var releasingListener = repositoryDataUpdateListener.delegateResponse((l, e) -> { + try { + shardBlobsToDelete.close(); + } finally { + l.onFailure(e); } - - @Override - public void onFailure(Exception e) { - try { - shardBlobsToDelete.close(); - } finally { - listener.onFailure(e); - } - + }); + final Runnable releasingOnCompletion = () -> { + try { + shardBlobsToDelete.close(); + } finally { + onCompletion.run(); } }; if (useShardGenerations) { - runWithUniqueShardMetadataNaming(releasingListener); + runWithUniqueShardMetadataNaming(releasingListener, releasingOnCompletion); } else { - runWithLegacyNumericShardMetadataNaming(wrapWithWeakConsistencyProtection(releasingListener)); + runWithLegacyNumericShardMetadataNaming(wrapWithWeakConsistencyProtection(releasingListener), releasingOnCompletion); } } - private void runWithUniqueShardMetadataNaming(SnapshotDeleteListener listener) { + private void runWithUniqueShardMetadataNaming(ActionListener repositoryDataUpdateListener, Runnable onCompletion) { SubscribableListener // First write the new shard state metadata (without the removed snapshots) and compute deletion targets @@ -1082,30 +1066,29 @@ private void runWithUniqueShardMetadataNaming(SnapshotDeleteListener listener) { ); }) - .addListener( - ActionListener.wrap( - // Once we have updated the repository, run the clean-ups - newRepositoryData -> { - listener.onRepositoryDataWritten(newRepositoryData); - // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion - try (var refs = new RefCountingRunnable(listener::onDone)) { - cleanupUnlinkedRootAndIndicesBlobs(newRepositoryData, refs.acquireListener()); - cleanupUnlinkedShardLevelBlobs(refs.acquireListener()); - } - }, - listener::onFailure - ) - ); + .andThen((l, newRepositoryData) -> { + l.onResponse(newRepositoryData); + // Once we have updated the repository, run the unreferenced blobs cleanup in parallel to shard-level snapshot deletion + try (var refs = new RefCountingRunnable(onCompletion)) { + cleanupUnlinkedRootAndIndicesBlobs(newRepositoryData, refs.acquireListener()); + cleanupUnlinkedShardLevelBlobs(refs.acquireListener()); + } + }) + + .addListener(repositoryDataUpdateListener); } - private void runWithLegacyNumericShardMetadataNaming(SnapshotDeleteListener listener) { + private void runWithLegacyNumericShardMetadataNaming( + ActionListener repositoryDataUpdateListener, + Runnable onCompletion + ) { // Write the new repository data first (with the removed snapshot), using no shard generations updateRepositoryData( originalRepositoryData.removeSnapshots(snapshotIds, ShardGenerations.EMPTY), - ActionListener.wrap(newRepositoryData -> { + repositoryDataUpdateListener.delegateFailure((delegate, newRepositoryData) -> { try (var refs = new RefCountingRunnable(() -> { - listener.onRepositoryDataWritten(newRepositoryData); - listener.onDone(); + delegate.onResponse(newRepositoryData); + onCompletion.run(); })) { // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion cleanupUnlinkedRootAndIndicesBlobs(newRepositoryData, refs.acquireListener()); @@ -1120,7 +1103,7 @@ private void runWithLegacyNumericShardMetadataNaming(SnapshotDeleteListener list ) ); } - }, listener::onFailure) + }) ); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotDeleteListener.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotDeleteListener.java deleted file mode 100644 index 324ad736d7248..0000000000000 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotDeleteListener.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.snapshots; - -import org.elasticsearch.repositories.RepositoryData; - -public interface SnapshotDeleteListener { - - /** - * Invoked once the snapshots have been fully deleted from the repository, including all async cleanup operations, indicating that - * listeners waiting for the end of the deletion can now be notified. - */ - void onDone(); - - /** - * Invoked once the updated {@link RepositoryData} has been written to the repository and it is safe for the next repository operation - * to proceed. - * - * @param repositoryData updated repository data - */ - void onRepositoryDataWritten(RepositoryData repositoryData); - - /** - * Invoked if writing updated {@link RepositoryData} to the repository failed. Once {@link #onRepositoryDataWritten(RepositoryData)} has - * been invoked this method will never be invoked. - * - * @param e exception during metadata steps of snapshot delete - */ - void onFailure(Exception e); -} diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 6d7404d7472e5..ed88b7272245f 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.RefCountingRunnable; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; @@ -2491,19 +2492,11 @@ private void deleteSnapshotsFromRepository( ); return; } + final SubscribableListener doneFuture = new SubscribableListener<>(); repositoriesService.repository(deleteEntry.repository()) - .deleteSnapshots(snapshotIds, repositoryData.getGenId(), minNodeVersion, new SnapshotDeleteListener() { - - private final ListenableFuture doneFuture = new ListenableFuture<>(); - - @Override - public void onDone() { - logger.info("snapshots {} deleted", snapshotIds); - doneFuture.onResponse(null); - } - + .deleteSnapshots(snapshotIds, repositoryData.getGenId(), minNodeVersion, new ActionListener<>() { @Override - public void onRepositoryDataWritten(RepositoryData updatedRepoData) { + public void onResponse(RepositoryData updatedRepoData) { removeSnapshotDeletionFromClusterState( deleteEntry, updatedRepoData, @@ -2549,6 +2542,9 @@ protected void handleListeners(List> deleteListeners) { } ); } + }, () -> { + logger.info("snapshots {} deleted", snapshotIds); + doneFuture.onResponse(null); }); } } diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java index 83cb189415f7e..59e0b955d1cff 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java @@ -41,7 +41,6 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.test.ClusterServiceUtils; @@ -454,9 +453,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - listener.onFailure(new UnsupportedOperationException()); + repositoryDataUpdateListener.onFailure(new UnsupportedOperationException()); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java index 26e887338158d..92ce7e083df3e 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java @@ -27,7 +27,6 @@ import org.elasticsearch.repositories.ShardGenerations; import org.elasticsearch.repositories.ShardSnapshotResult; import org.elasticsearch.repositories.SnapshotShardContext; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -110,9 +109,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - listener.onFailure(new UnsupportedOperationException()); + repositoryDataUpdateListener.onFailure(new UnsupportedOperationException()); } @Override diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index d5a6e3c7e65c8..97e3a409d590d 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -82,7 +82,6 @@ import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.repositories.blobstore.FileRestoreContext; import org.elasticsearch.snapshots.Snapshot; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotException; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -371,9 +370,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - listener.onFailure(new UnsupportedOperationException("Unsupported for repository of type: " + TYPE)); + repositoryDataUpdateListener.onFailure(new UnsupportedOperationException("Unsupported for repository of type: " + TYPE)); } @Override From 6f3fab974998e0aedcd8eefbf20544890fcdd068 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Tue, 20 Aug 2024 11:39:35 +0300 Subject: [PATCH 088/389] Check for valid parentDoc before retrieving its previous (#112005) #111943 unveiled a bug in `collectChilder` where we attempt to collect the previous doc of the parent, even when the parent doc has no previous doc. Fixes #111990, #111991, #111992, #111993 --- docs/changelog/112005.yaml | 6 ++++++ .../org/elasticsearch/index/mapper/NestedObjectMapper.java | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/112005.yaml diff --git a/docs/changelog/112005.yaml b/docs/changelog/112005.yaml new file mode 100644 index 0000000000000..2d84381e632b3 --- /dev/null +++ b/docs/changelog/112005.yaml @@ -0,0 +1,6 @@ +pr: 112005 +summary: Check for valid `parentDoc` before retrieving its previous +area: Mapping +type: bug +issues: + - 111990 diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index 23bdd0f559206..f3c438adcea09 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -441,7 +441,7 @@ public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf private List collectChildren(int parentDoc, BitSet parentDocs, DocIdSetIterator childIt) throws IOException { assert parentDocs.get(parentDoc) : "wrong context, doc " + parentDoc + " is not a parent of " + nestedTypePath; - final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1); + final int prevParentDoc = parentDoc > 0 ? parentDocs.prevSetBit(parentDoc - 1) : -1; int childDocId = childIt.docID(); if (childDocId <= prevParentDoc) { childDocId = childIt.advance(prevParentDoc + 1); From e19cd0d20756f8e0a65a2d35f73dcbb014f36300 Mon Sep 17 00:00:00 2001 From: Florian Lehner Date: Tue, 20 Aug 2024 12:06:22 +0200 Subject: [PATCH 089/389] [Profiling] add container.id field to event index template (#111969) * [Profiling] add container.id field to event index template This PR adds a new container.id field to the index template of the profiling-events data-stream. The field name was chosen in [accordance with ECS](https://www.elastic.co/guide/en/ecs/current/ecs-container.html#field-container-id) and also matches what the field is called in the APM indices. Signed-off-by: Florian Lehner * Update docs/changelog/111969.yaml --------- Signed-off-by: Florian Lehner --- docs/changelog/111969.yaml | 5 +++++ .../profiling/component-template/profiling-events.json | 3 +++ .../persistence/ProfilingIndexTemplateRegistry.java | 5 +++-- 3 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/111969.yaml diff --git a/docs/changelog/111969.yaml b/docs/changelog/111969.yaml new file mode 100644 index 0000000000000..2d276850c4988 --- /dev/null +++ b/docs/changelog/111969.yaml @@ -0,0 +1,5 @@ +pr: 111969 +summary: "[Profiling] add `container.id` field to event index template" +area: Application +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json index 9b90f97682306..8f50ebd334f16 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json @@ -77,6 +77,9 @@ }, "service.name": { "type": "keyword" + }, + "container.id": { + "type": "keyword" } } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java index 3b361748abf67..7d8a474453c4c 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java @@ -53,10 +53,11 @@ public class ProfilingIndexTemplateRegistry extends IndexTemplateRegistry { // version 10: changed mapping profiling-events @timestamp to 'date_nanos' from 'date' // version 11: Added 'profiling.agent.protocol' keyword mapping to profiling-hosts // version 12: Added 'profiling.agent.env_https_proxy' keyword mapping to profiling-hosts - public static final int INDEX_TEMPLATE_VERSION = 12; + // version 13: Added 'container.id' keyword mapping to profiling-events + public static final int INDEX_TEMPLATE_VERSION = 13; // history for individual indices / index templates. Only bump these for breaking changes that require to create a new index - public static final int PROFILING_EVENTS_VERSION = 4; + public static final int PROFILING_EVENTS_VERSION = 5; public static final int PROFILING_EXECUTABLES_VERSION = 1; public static final int PROFILING_METRICS_VERSION = 2; public static final int PROFILING_HOSTS_VERSION = 2; From 9ab86652355bebd4909409a66166596531b66005 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Tue, 20 Aug 2024 13:23:36 +0300 Subject: [PATCH 090/389] No error when `store_array_source` is used without synthetic source (#111966) * No error for store_array_source in standard mode * Update docs/changelog/111966.yaml * nested object test * restore noop tests * spotless fix --- docs/changelog/111966.yaml | 5 +++++ .../java/org/elasticsearch/index/mapper/ObjectMapper.java | 3 --- .../index/mapper/NestedObjectMapperTests.java | 8 ++++---- .../org/elasticsearch/index/mapper/ObjectMapperTests.java | 8 ++++---- 4 files changed, 13 insertions(+), 11 deletions(-) create mode 100644 docs/changelog/111966.yaml diff --git a/docs/changelog/111966.yaml b/docs/changelog/111966.yaml new file mode 100644 index 0000000000000..facf0a61c4d8a --- /dev/null +++ b/docs/changelog/111966.yaml @@ -0,0 +1,5 @@ +pr: 111966 +summary: No error when `store_array_source` is used without synthetic source +area: Mapping +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index 843fc3b15a6df..2c78db6bc8b0d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -473,9 +473,6 @@ public final boolean storeArraySource() { @Override public void validate(MappingLookup mappers) { - if (storeArraySource() && mappers.isSourceSynthetic() == false) { - throw new MapperParsingException("Parameter [" + STORE_ARRAY_SOURCE_PARAM + "] can only be set in synthetic source mode."); - } for (Mapper mapper : this.mappers.values()) { mapper.validate(mappers); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index 4fba22101df03..13bd5955d67a5 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -1575,11 +1575,11 @@ public void testStoreArraySourceinSyntheticSourceMode() throws IOException { assertNotNull(mapper.mapping().getRoot().getMapper("o")); } - public void testStoreArraySourceThrowsInNonSyntheticSourceMode() { - var exception = expectThrows(MapperParsingException.class, () -> createDocumentMapper(mapping(b -> { + public void testStoreArraySourceNoopInNonSyntheticSourceMode() throws IOException { + DocumentMapper mapper = createDocumentMapper(mapping(b -> { b.startObject("o").field("type", "nested").field(ObjectMapper.STORE_ARRAY_SOURCE_PARAM, true).endObject(); - }))); - assertEquals("Parameter [store_array_source] can only be set in synthetic source mode.", exception.getMessage()); + })); + assertNotNull(mapper.mapping().getRoot().getMapper("o")); } public void testSyntheticNestedWithObject() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index 6687a28883716..3c81f833985dd 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -546,11 +546,11 @@ public void testStoreArraySourceinSyntheticSourceMode() throws IOException { assertNotNull(mapper.mapping().getRoot().getMapper("o")); } - public void testStoreArraySourceThrowsInNonSyntheticSourceMode() { - var exception = expectThrows(MapperParsingException.class, () -> createDocumentMapper(mapping(b -> { + public void testStoreArraySourceNoopInNonSyntheticSourceMode() throws IOException { + DocumentMapper mapper = createDocumentMapper(mapping(b -> { b.startObject("o").field("type", "object").field(ObjectMapper.STORE_ARRAY_SOURCE_PARAM, true).endObject(); - }))); - assertEquals("Parameter [store_array_source] can only be set in synthetic source mode.", exception.getMessage()); + })); + assertNotNull(mapper.mapping().getRoot().getMapper("o")); } public void testNestedObjectWithMultiFieldsgetTotalFieldsCount() { From 3f49509f04b307cf84abd63f01a7b83819e17184 Mon Sep 17 00:00:00 2001 From: Felix Barnsteiner Date: Tue, 20 Aug 2024 12:56:31 +0200 Subject: [PATCH 091/389] Make error.grouping_name script compatible with synthetic _source (#112009) --- .../component-templates/logs-apm.error@mappings.yaml | 11 ++++++++--- .../rest-api-spec/test/20_error_grouping.yml | 7 ++++++- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml index 1e2a6a679dc30..c1d004b4e7bf4 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml @@ -28,9 +28,14 @@ template: return; } def exception = params['_source'].error?.exception; - def exceptionMessage = exception != null && exception.length > 0 ? exception[0]?.message : null; - if (exceptionMessage != null && exceptionMessage != "") { - emit(exception[0].message); + if (exception != null && exception.isEmpty() == false) { + def exceptionMessage = exception instanceof Map ? exception?.message : exception[0]?.message; + if (exceptionMessage instanceof List) { + exceptionMessage = exceptionMessage[0] + } + if (exceptionMessage != null && exceptionMessage != "") { + emit(exceptionMessage); + } } # http.* diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_grouping.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_grouping.yml index f7cd386227fe8..37a1651da562b 100644 --- a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_grouping.yml +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_grouping.yml @@ -39,6 +39,10 @@ setup: - create: {} - '{"@timestamp": "2017-06-22", "error": {"log": {"message": ""}, "exception": [{"message": "exception_used"}]}}' + # Non-empty error.exception.message used from array + - create: {} + - '{"@timestamp": "2017-06-22", "error": {"log": {"message": ""}, "exception": [{"message": "first_exception_used"}, {"message": "2_ignored"}]}}' + - is_false: errors - do: @@ -46,7 +50,7 @@ setup: index: logs-apm.error-testing body: fields: ["error.grouping_name"] - - length: { hits.hits: 7 } + - length: { hits.hits: 8 } - match: { hits.hits.0.fields: null } - match: { hits.hits.1.fields: null } - match: { hits.hits.2.fields: null } @@ -54,3 +58,4 @@ setup: - match: { hits.hits.4.fields: null } - match: { hits.hits.5.fields: {"error.grouping_name": ["log_used"]} } - match: { hits.hits.6.fields: {"error.grouping_name": ["exception_used"]} } + - match: { hits.hits.7.fields: {"error.grouping_name": ["first_exception_used"]} } From dd49c33479d5f27cd708a2a71b0407af970d6e94 Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Tue, 20 Aug 2024 13:40:59 +0200 Subject: [PATCH 092/389] ESQL: BUCKET: allow numerical spans as whole numbers (#111874) This laxes the check on numerical spans to allow them be specified as whole numbers. So far it was required that they be provided as a double. This also expands the tests for date ranges to include string types. Resolves #109340, resolves #104646, resolves #105375. --- docs/changelog/111874.yaml | 8 + .../esql/functions/examples/bucket.asciidoc | 4 - .../functions/kibana/definition/bucket.json | 460 +++++++++++++++--- .../esql/functions/parameters/bucket.asciidoc | 4 +- .../esql/functions/types/bucket.asciidoc | 14 + .../src/main/resources/bucket.csv-spec | 26 + .../src/main/resources/meta.csv-spec | 8 +- .../xpack/esql/action/EsqlCapabilities.java | 7 +- .../expression/function/grouping/Bucket.java | 50 +- .../xpack/esql/analysis/VerifierTests.java | 64 ++- .../function/grouping/BucketTests.java | 53 +- .../optimizer/LogicalPlanOptimizerTests.java | 43 ++ 12 files changed, 632 insertions(+), 109 deletions(-) create mode 100644 docs/changelog/111874.yaml diff --git a/docs/changelog/111874.yaml b/docs/changelog/111874.yaml new file mode 100644 index 0000000000000..26ec90aa6cd4c --- /dev/null +++ b/docs/changelog/111874.yaml @@ -0,0 +1,8 @@ +pr: 111874 +summary: "ESQL: BUCKET: allow numerical spans as whole numbers" +area: ES|QL +type: enhancement +issues: + - 104646 + - 109340 + - 105375 diff --git a/docs/reference/esql/functions/examples/bucket.asciidoc b/docs/reference/esql/functions/examples/bucket.asciidoc index e1bba0529d7db..4afea30660339 100644 --- a/docs/reference/esql/functions/examples/bucket.asciidoc +++ b/docs/reference/esql/functions/examples/bucket.asciidoc @@ -86,10 +86,6 @@ include::{esql-specs}/bucket.csv-spec[tag=docsBucketNumericWithSpan] |=== include::{esql-specs}/bucket.csv-spec[tag=docsBucketNumericWithSpan-result] |=== - -NOTE: When providing the bucket size as the second parameter, it must be -of a floating point type. - Create hourly buckets for the last 24 hours, and calculate the number of events per hour: [source.merge.styled,esql] ---- diff --git a/docs/reference/esql/functions/kibana/definition/bucket.json b/docs/reference/esql/functions/kibana/definition/bucket.json index 7141ca4c27443..14bd74c1c20f3 100644 --- a/docs/reference/esql/functions/kibana/definition/bucket.json +++ b/docs/reference/esql/functions/kibana/definition/bucket.json @@ -40,13 +40,253 @@ "name" : "from", "type" : "datetime", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "datetime", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "datetime" + }, + { + "params" : [ + { + "name" : "field", + "type" : "datetime", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets." + }, + { + "name" : "from", + "type" : "datetime", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "keyword", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "datetime" + }, + { + "params" : [ + { + "name" : "field", + "type" : "datetime", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets." + }, + { + "name" : "from", + "type" : "datetime", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "text", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "datetime" + }, + { + "params" : [ + { + "name" : "field", + "type" : "datetime", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets." + }, + { + "name" : "from", + "type" : "keyword", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "datetime", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "datetime" + }, + { + "params" : [ + { + "name" : "field", + "type" : "datetime", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets." + }, + { + "name" : "from", + "type" : "keyword", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "keyword", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "datetime" + }, + { + "params" : [ + { + "name" : "field", + "type" : "datetime", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets." + }, + { + "name" : "from", + "type" : "keyword", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "text", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "datetime" + }, + { + "params" : [ + { + "name" : "field", + "type" : "datetime", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets." + }, + { + "name" : "from", + "type" : "text", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "datetime", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "datetime" + }, + { + "params" : [ + { + "name" : "field", + "type" : "datetime", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets." + }, + { + "name" : "from", + "type" : "text", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "keyword", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "datetime" + }, + { + "params" : [ + { + "name" : "field", + "type" : "datetime", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets." + }, + { + "name" : "from", + "type" : "text", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "text", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -88,6 +328,24 @@ "variadic" : false, "returnType" : "double" }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets." + } + ], + "variadic" : false, + "returnType" : "double" + }, { "params" : [ { @@ -106,13 +364,13 @@ "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -136,13 +394,13 @@ "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -166,13 +424,13 @@ "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -196,13 +454,13 @@ "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -226,13 +484,13 @@ "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -256,13 +514,13 @@ "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -286,13 +544,13 @@ "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -316,13 +574,13 @@ "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -346,13 +604,31 @@ "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "long", + "optional" : false, + "description" : "Target number of buckets." } ], "variadic" : false, @@ -376,6 +652,24 @@ "variadic" : false, "returnType" : "double" }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets." + } + ], + "variadic" : false, + "returnType" : "double" + }, { "params" : [ { @@ -394,13 +688,13 @@ "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -424,13 +718,13 @@ "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -454,13 +748,13 @@ "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -484,13 +778,13 @@ "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -514,13 +808,13 @@ "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -544,13 +838,13 @@ "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -574,13 +868,13 @@ "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -604,13 +898,13 @@ "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -634,13 +928,31 @@ "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "long", + "optional" : false, + "description" : "Target number of buckets." } ], "variadic" : false, @@ -664,6 +976,24 @@ "variadic" : false, "returnType" : "double" }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets." + } + ], + "variadic" : false, + "returnType" : "double" + }, { "params" : [ { @@ -682,13 +1012,13 @@ "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -712,13 +1042,13 @@ "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -742,13 +1072,13 @@ "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -772,13 +1102,13 @@ "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -802,13 +1132,13 @@ "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -832,13 +1162,13 @@ "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -862,13 +1192,13 @@ "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -892,13 +1222,13 @@ "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -922,13 +1252,31 @@ "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "long", + "optional" : false, + "description" : "Target number of buckets." } ], "variadic" : false, diff --git a/docs/reference/esql/functions/parameters/bucket.asciidoc b/docs/reference/esql/functions/parameters/bucket.asciidoc index 39aac14aaa36d..342ea560aaa0b 100644 --- a/docs/reference/esql/functions/parameters/bucket.asciidoc +++ b/docs/reference/esql/functions/parameters/bucket.asciidoc @@ -9,7 +9,7 @@ Numeric or date expression from which to derive buckets. Target number of buckets. `from`:: -Start of the range. Can be a number or a date expressed as a string. +Start of the range. Can be a number, a date or a date expressed as a string. `to`:: -End of the range. Can be a number or a date expressed as a string. +End of the range. Can be a number, a date or a date expressed as a string. diff --git a/docs/reference/esql/functions/types/bucket.asciidoc b/docs/reference/esql/functions/types/bucket.asciidoc index d1ce8e499eb07..1cbfad14ca379 100644 --- a/docs/reference/esql/functions/types/bucket.asciidoc +++ b/docs/reference/esql/functions/types/bucket.asciidoc @@ -7,6 +7,14 @@ field | buckets | from | to | result datetime | date_period | | | datetime datetime | integer | datetime | datetime | datetime +datetime | integer | datetime | keyword | datetime +datetime | integer | datetime | text | datetime +datetime | integer | keyword | datetime | datetime +datetime | integer | keyword | keyword | datetime +datetime | integer | keyword | text | datetime +datetime | integer | text | datetime | datetime +datetime | integer | text | keyword | datetime +datetime | integer | text | text | datetime datetime | time_duration | | | datetime double | double | | | double double | integer | double | double | double @@ -18,6 +26,8 @@ double | integer | integer | long | double double | integer | long | double | double double | integer | long | integer | double double | integer | long | long | double +double | integer | | | double +double | long | | | double integer | double | | | double integer | integer | double | double | double integer | integer | double | integer | double @@ -28,6 +38,8 @@ integer | integer | integer | long | double integer | integer | long | double | double integer | integer | long | integer | double integer | integer | long | long | double +integer | integer | | | double +integer | long | | | double long | double | | | double long | integer | double | double | double long | integer | double | integer | double @@ -38,4 +50,6 @@ long | integer | integer | long | double long | integer | long | double | double long | integer | long | integer | double long | integer | long | long | double +long | integer | | | double +long | long | | | double |=== diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec index 7e2afb9267e5b..b8569ead94509 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec @@ -314,6 +314,21 @@ FROM sample_data 3 |2025-10-01T00:00:00.000Z ; +bucketByYearLowBucketCount#[skip:-8.13.99, reason:BUCKET extended in 8.14] +FROM employees +| WHERE hire_date >= "1985-02-18T00:00:00.000Z" AND hire_date <= "1988-10-18T00:00:00.000Z" +| STATS c = COUNT(*) BY b = BUCKET(hire_date, 3, "1985-02-18T00:00:00.000Z", "1988-10-18T00:00:00.000Z") +| SORT b +; + +// Note: we don't bucket to anything longer than 1 year (like 2 years), so even if requesting 3 buckets, we still get 4 + c:long | b:date +11 |1985-01-01T00:00:00.000Z +11 |1986-01-01T00:00:00.000Z +15 |1987-01-01T00:00:00.000Z +9 |1988-01-01T00:00:00.000Z +; + // // Numeric bucketing // @@ -393,6 +408,17 @@ ROW long = TO_LONG(100), double = 99., int = 100 99.0 |0.0 |99.0 ; +// identical results as above +bucketNumericMixedTypesIntegerSpans +required_capability: bucket_whole_number_as_span +ROW long = TO_LONG(100), double = 99., int = 100 +| STATS BY b1 = BUCKET(long, double::int), b2 = BUCKET(double, long), b3 = BUCKET(int, 49.5) +; + + b1:double| b2:double| b3:double +99.0 |0.0 |99.0 +; + bucketWithFloats#[skip:-8.13.99, reason:BUCKET renamed in 8.14] FROM employees | WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec index 35c852d6ba2fe..951545a546826 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec @@ -9,8 +9,8 @@ synopsis:keyword "double atan(number:double|integer|long|unsigned_long)" "double atan2(y_coordinate:double|integer|long|unsigned_long, x_coordinate:double|integer|long|unsigned_long)" "double avg(number:double|integer|long)" -"double|date bin(field:integer|long|double|date, buckets:integer|double|date_period|time_duration, ?from:integer|long|double|date, ?to:integer|long|double|date)" -"double|date bucket(field:integer|long|double|date, buckets:integer|double|date_period|time_duration, ?from:integer|long|double|date, ?to:integer|long|double|date)" +"double|date bin(field:integer|long|double|date, buckets:integer|long|double|date_period|time_duration, ?from:integer|long|double|date|keyword|text, ?to:integer|long|double|date|keyword|text)" +"double|date bucket(field:integer|long|double|date, buckets:integer|long|double|date_period|time_duration, ?from:integer|long|double|date|keyword|text, ?to:integer|long|double|date|keyword|text)" "boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version case(condition:boolean, trueValue...:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" "double cbrt(number:double|integer|long|unsigned_long)" "double|integer|long|unsigned_long ceil(number:double|integer|long|unsigned_long)" @@ -132,8 +132,8 @@ asin |number |"double|integer|long|unsigne atan |number |"double|integer|long|unsigned_long" |Numeric expression. If `null`, the function returns `null`. atan2 |[y_coordinate, x_coordinate] |["double|integer|long|unsigned_long", "double|integer|long|unsigned_long"] |[y coordinate. If `null`\, the function returns `null`., x coordinate. If `null`\, the function returns `null`.] avg |number |"double|integer|long" |[""] -bin |[field, buckets, from, to] |["integer|long|double|date", "integer|double|date_period|time_duration", "integer|long|double|date", "integer|long|double|date"] |[Numeric or date expression from which to derive buckets., Target number of buckets., Start of the range. Can be a number or a date expressed as a string., End of the range. Can be a number or a date expressed as a string.] -bucket |[field, buckets, from, to] |["integer|long|double|date", "integer|double|date_period|time_duration", "integer|long|double|date", "integer|long|double|date"] |[Numeric or date expression from which to derive buckets., Target number of buckets., Start of the range. Can be a number or a date expressed as a string., End of the range. Can be a number or a date expressed as a string.] +bin |[field, buckets, from, to] |["integer|long|double|date", "integer|long|double|date_period|time_duration", "integer|long|double|date|keyword|text", "integer|long|double|date|keyword|text"] |[Numeric or date expression from which to derive buckets., Target number of buckets\, or desired bucket size if `from` and `to` parameters are omitted., Start of the range. Can be a number\, a date or a date expressed as a string., End of the range. Can be a number\, a date or a date expressed as a string.] +bucket |[field, buckets, from, to] |["integer|long|double|date", "integer|long|double|date_period|time_duration", "integer|long|double|date|keyword|text", "integer|long|double|date|keyword|text"] |[Numeric or date expression from which to derive buckets., Target number of buckets\, or desired bucket size if `from` and `to` parameters are omitted., Start of the range. Can be a number\, a date or a date expressed as a string., End of the range. Can be a number\, a date or a date expressed as a string.] case |[condition, trueValue] |[boolean, "boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version"] |[A condition., The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches.] cbrt |number |"double|integer|long|unsigned_long" |"Numeric expression. If `null`, the function returns `null`." ceil |number |"double|integer|long|unsigned_long" |Numeric expression. If `null`, the function returns `null`. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 996c5ac2ea319..0477167cd7315 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -234,7 +234,12 @@ public enum Cap { /** * Changed error messages for fields with conflicting types in different indices. */ - SHORT_ERROR_MESSAGES_FOR_UNSUPPORTED_FIELDS; + SHORT_ERROR_MESSAGES_FOR_UNSUPPORTED_FIELDS, + + /** + * Support for the whole number spans in BUCKET function. + */ + BUCKET_WHOLE_NUMBER_AS_SPAN; private final boolean snapshotOnly; private final FeatureFlag featureFlag; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java index 712eee8672bf3..5fabfe0e03d89 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java @@ -37,6 +37,7 @@ import java.io.IOException; import java.time.ZoneId; import java.time.ZoneOffset; +import java.util.ArrayList; import java.util.List; import java.util.function.Function; @@ -144,9 +145,7 @@ another in which the bucket size is provided directly (two parameters). ), @Example(description = """ The range can be omitted if the desired bucket size is known in advance. Simply - provide it as the second argument:""", file = "bucket", tag = "docsBucketNumericWithSpan", explanation = """ - NOTE: When providing the bucket size as the second parameter, it must be - of a floating point type."""), + provide it as the second argument:""", file = "bucket", tag = "docsBucketNumericWithSpan"), @Example( description = "Create hourly buckets for the last 24 hours, and calculate the number of events per hour:", file = "bucket", @@ -176,23 +175,23 @@ public Bucket( ) Expression field, @Param( name = "buckets", - type = { "integer", "double", "date_period", "time_duration" }, - description = "Target number of buckets." + type = { "integer", "long", "double", "date_period", "time_duration" }, + description = "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." ) Expression buckets, @Param( name = "from", - type = { "integer", "long", "double", "date" }, + type = { "integer", "long", "double", "date", "keyword", "text" }, optional = true, - description = "Start of the range. Can be a number or a date expressed as a string." + description = "Start of the range. Can be a number, a date or a date expressed as a string." ) Expression from, @Param( name = "to", - type = { "integer", "long", "double", "date" }, + type = { "integer", "long", "double", "date", "keyword", "text" }, optional = true, - description = "End of the range. Can be a number or a date expressed as a string." + description = "End of the range. Can be a number, a date or a date expressed as a string." ) Expression to ) { - super(source, from != null && to != null ? List.of(field, buckets, from, to) : List.of(field, buckets)); + super(source, fields(field, buckets, from, to)); this.field = field; this.buckets = buckets; this.from = from; @@ -209,6 +208,19 @@ private Bucket(StreamInput in) throws IOException { ); } + private static List fields(Expression field, Expression buckets, Expression from, Expression to) { + List list = new ArrayList<>(4); + list.add(field); + list.add(buckets); + if (from != null) { + list.add(from); + if (to != null) { + list.add(to); + } + } + return list; + } + @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); @@ -251,7 +263,6 @@ public ExpressionEvaluator.Factory toEvaluator(Function isNumeric(from, sourceText(), THIRD)).and(() -> isNumeric(to, sourceText(), FOURTH)) - : isNumeric(buckets, sourceText(), SECOND).and(checkArgsCount(2)); + return isNumeric(buckets, sourceText(), SECOND).and(() -> { + if (bucketsType.isRationalNumber()) { + return checkArgsCount(2); + } else { // second arg is a whole number: either a span, but as a whole, or count, and we must expect a range + var resolution = checkArgsCount(2); + if (resolution.resolved() == false) { + resolution = checkArgsCount(4).and(() -> isNumeric(from, sourceText(), THIRD)) + .and(() -> isNumeric(to, sourceText(), FOURTH)); + } + return resolution; + } + }); } return isType(field, e -> false, sourceText(), FIRST, "datetime", "numeric"); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index ab216e10b674c..bdea0807a78c4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -394,6 +394,66 @@ public void testGroupingInsideGrouping() { ); } + public void testInvalidBucketCalls() { + assertThat( + error("from test | stats max(emp_no) by bucket(emp_no, 5, \"2000-01-01\")"), + containsString( + "function expects exactly four arguments when the first one is of type [INTEGER] and the second of type [INTEGER]" + ) + ); + + assertThat( + error("from test | stats max(emp_no) by bucket(emp_no, 1 week, \"2000-01-01\")"), + containsString( + "second argument of [bucket(emp_no, 1 week, \"2000-01-01\")] must be [numeric], found value [1 week] type [date_period]" + ) + ); + + assertThat( + error("from test | stats max(emp_no) by bucket(hire_date, 5.5, \"2000-01-01\")"), + containsString( + "second argument of [bucket(hire_date, 5.5, \"2000-01-01\")] must be [integral, date_period or time_duration], " + + "found value [5.5] type [double]" + ) + ); + + assertThat( + error("from test | stats max(emp_no) by bucket(hire_date, 5, 1 day, 1 month)"), + containsString( + "third argument of [bucket(hire_date, 5, 1 day, 1 month)] must be [datetime or string], " + + "found value [1 day] type [date_period]" + ) + ); + + assertThat( + error("from test | stats max(emp_no) by bucket(hire_date, 5, \"2000-01-01\", 1 month)"), + containsString( + "fourth argument of [bucket(hire_date, 5, \"2000-01-01\", 1 month)] must be [datetime or string], " + + "found value [1 month] type [date_period]" + ) + ); + + assertThat( + error("from test | stats max(emp_no) by bucket(hire_date, 5, \"2000-01-01\")"), + containsString( + "function expects exactly four arguments when the first one is of type [DATETIME] and the second of type [INTEGER]" + ) + ); + + assertThat( + error("from test | stats max(emp_no) by bucket(emp_no, \"5\")"), + containsString("second argument of [bucket(emp_no, \"5\")] must be [numeric], found value [\"5\"] type [keyword]") + ); + + assertThat( + error("from test | stats max(emp_no) by bucket(hire_date, \"5\")"), + containsString( + "second argument of [bucket(hire_date, \"5\")] must be [integral, date_period or time_duration], " + + "found value [\"5\"] type [keyword]" + ) + ); + } + public void testAggsWithInvalidGrouping() { assertEquals( "1:35: column [languages] cannot be used as an aggregate once declared in the STATS BY grouping key [l = languages % 3]", @@ -748,9 +808,9 @@ public void testAggsResolutionWithUnresolvedGroupings() { ); assertThat(error("FROM tests | STATS " + agg_func + "(foobar) by foobar"), matchesRegex("1:\\d+: Unknown column \\[foobar]")); assertThat( - error("FROM tests | STATS " + agg_func + "(foobar) by BUCKET(languages, 10)"), + error("FROM tests | STATS " + agg_func + "(foobar) by BUCKET(hire_date, 10)"), matchesRegex( - "1:\\d+: function expects exactly four arguments when the first one is of type \\[INTEGER]" + "1:\\d+: function expects exactly four arguments when the first one is of type \\[DATETIME]" + " and the second of type \\[INTEGER]\n" + "line 1:\\d+: Unknown column \\[foobar]" ) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketTests.java index 4c7b812111450..a26504b8ced9a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketTests.java @@ -73,7 +73,7 @@ public static Iterable parameters() { } // TODO once we cast above the functions we can drop these - private static final DataType[] DATE_BOUNDS_TYPE = new DataType[] { DataType.DATETIME }; + private static final DataType[] DATE_BOUNDS_TYPE = new DataType[] { DataType.DATETIME, DataType.KEYWORD, DataType.TEXT }; private static void dateCases(List suppliers, String name, LongSupplier date) { for (DataType fromType : DATE_BOUNDS_TYPE) { @@ -89,7 +89,7 @@ private static void dateCases(List suppliers, String name, Lon args, "DateTruncEvaluator[fieldVal=Attribute[channel=0], rounding=Rounding[DAY_OF_MONTH in Z][fixed to midnight]]", DataType.DATETIME, - dateResultsMatcher(args) + resultsMatcher(args) ); })); // same as above, but a low bucket count and datetime bounds that match it (at hour span) @@ -136,7 +136,7 @@ private static void dateCasesWithSpan( args, "DateTruncEvaluator[fieldVal=Attribute[channel=0], rounding=Rounding" + spanStr + "]", DataType.DATETIME, - dateResultsMatcher(args) + resultsMatcher(args) ); })); } @@ -167,7 +167,7 @@ private static void numberCases(List suppliers, String name, D + ", " + "rhs=LiteralsEvaluator[lit=50.0]]], rhs=LiteralsEvaluator[lit=50.0]]", DataType.DOUBLE, - dateResultsMatcher(args) + resultsMatcher(args) ); })); } @@ -187,26 +187,29 @@ private static TestCaseSupplier.TypedData numericBound(String name, DataType typ } private static void numberCasesWithSpan(List suppliers, String name, DataType numberType, Supplier number) { - suppliers.add(new TestCaseSupplier(name, List.of(numberType, DataType.DOUBLE), () -> { - List args = new ArrayList<>(); - args.add(new TestCaseSupplier.TypedData(number.get(), "field")); - args.add(new TestCaseSupplier.TypedData(50., DataType.DOUBLE, "span").forceLiteral()); - String attr = "Attribute[channel=0]"; - if (numberType == DataType.INTEGER) { - attr = "CastIntToDoubleEvaluator[v=" + attr + "]"; - } else if (numberType == DataType.LONG) { - attr = "CastLongToDoubleEvaluator[v=" + attr + "]"; - } - return new TestCaseSupplier.TestCase( - args, - "MulDoublesEvaluator[lhs=FloorDoubleEvaluator[val=DivDoublesEvaluator[lhs=" - + attr - + ", " - + "rhs=LiteralsEvaluator[lit=50.0]]], rhs=LiteralsEvaluator[lit=50.0]]", - DataType.DOUBLE, - dateResultsMatcher(args) - ); - })); + for (Number span : List.of(50, 50L, 50d)) { + DataType spanType = DataType.fromJava(span); + suppliers.add(new TestCaseSupplier(name, List.of(numberType, spanType), () -> { + List args = new ArrayList<>(); + args.add(new TestCaseSupplier.TypedData(number.get(), "field")); + args.add(new TestCaseSupplier.TypedData(span, spanType, "span").forceLiteral()); + String attr = "Attribute[channel=0]"; + if (numberType == DataType.INTEGER) { + attr = "CastIntToDoubleEvaluator[v=" + attr + "]"; + } else if (numberType == DataType.LONG) { + attr = "CastLongToDoubleEvaluator[v=" + attr + "]"; + } + return new TestCaseSupplier.TestCase( + args, + "MulDoublesEvaluator[lhs=FloorDoubleEvaluator[val=DivDoublesEvaluator[lhs=" + + attr + + ", " + + "rhs=LiteralsEvaluator[lit=50.0]]], rhs=LiteralsEvaluator[lit=50.0]]", + DataType.DOUBLE, + resultsMatcher(args) + ); + })); + } } @@ -214,7 +217,7 @@ private static TestCaseSupplier.TypedData keywordDateLiteral(String name, DataTy return new TestCaseSupplier.TypedData(date, type, name).forceLiteral(); } - private static Matcher dateResultsMatcher(List typedData) { + private static Matcher resultsMatcher(List typedData) { if (typedData.get(0).type() == DataType.DATETIME) { long millis = ((Number) typedData.get(0).data()).longValue(); return equalTo(Rounding.builder(Rounding.DateTimeUnit.DAY_OF_MONTH).build().prepareForUnknown().round(millis)); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index c6b12eb0dc23f..a294f33ece5c3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -3514,6 +3514,49 @@ public void testBucketWithAggExpression() { assertThat(agg.groupings().get(0), is(ref)); } + public void testBucketWithNonFoldingArgs() { + assertThat( + typesError("from types | stats max(integer) by bucket(date, integer, \"2000-01-01\", \"2000-01-02\")"), + containsString( + "second argument of [bucket(date, integer, \"2000-01-01\", \"2000-01-02\")] must be a constant, " + "received [integer]" + ) + ); + + assertThat( + typesError("from types | stats max(integer) by bucket(date, 2, date, \"2000-01-02\")"), + containsString("third argument of [bucket(date, 2, date, \"2000-01-02\")] must be a constant, " + "received [date]") + ); + + assertThat( + typesError("from types | stats max(integer) by bucket(date, 2, \"2000-01-02\", date)"), + containsString("fourth argument of [bucket(date, 2, \"2000-01-02\", date)] must be a constant, " + "received [date]") + ); + + assertThat( + typesError("from types | stats max(integer) by bucket(integer, long, 4, 5)"), + containsString("second argument of [bucket(integer, long, 4, 5)] must be a constant, " + "received [long]") + ); + + assertThat( + typesError("from types | stats max(integer) by bucket(integer, 3, long, 5)"), + containsString("third argument of [bucket(integer, 3, long, 5)] must be a constant, " + "received [long]") + ); + + assertThat( + typesError("from types | stats max(integer) by bucket(integer, 3, 4, long)"), + containsString("fourth argument of [bucket(integer, 3, 4, long)] must be a constant, " + "received [long]") + ); + } + + private String typesError(String query) { + VerificationException e = expectThrows(VerificationException.class, () -> planTypes(query)); + String message = e.getMessage(); + assertTrue(message.startsWith("Found ")); + String pattern = "\nline "; + int index = message.indexOf(pattern); + return message.substring(index + pattern.length()); + } + /** * Expects * Project[[x{r}#5]] From 47d331662cc8801336c36d83fa7b0ce7b6959a67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Tue, 20 Aug 2024 14:42:44 +0200 Subject: [PATCH 093/389] Fix query roles test by setting license synchronously (#112002) Relates: https://github.com/elastic/elasticsearch/issues/110729 The `testQueryDLSFLSRolesShowAsDisabled` failed intermittently and my theory is that it's because applying the license of the cluster to cluster state has `NORMAL` priority and therefore sometimes (very rarely) takes more than 10 seconds. There are some related discussions to this, see: https://github.com/elastic/elasticsearch/pull/67182, https://github.com/elastic/elasticsearch/issues/64578 Since we're not testing the actual license lifecycle in this test, but instead how an applied license impacts the query roles API, I changed the approach to use the synchronous `/_license/start_trial` API in a `@before` so we can be sure the license was applied before we start testing. An alternative to this fix could be to increase the timeout. --- muted-tests.yml | 3 -- .../xpack/security/LicenseDLSFLSRoleIT.java | 44 ++++++++----------- 2 files changed, 19 insertions(+), 28 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 95fb4a32b4227..c2e0d48c31a20 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -65,9 +65,6 @@ tests: - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" -- class: org.elasticsearch.xpack.security.LicenseDLSFLSRoleIT - method: testQueryDLSFLSRolesShowAsDisabled - issue: https://github.com/elastic/elasticsearch/issues/110729 - class: org.elasticsearch.xpack.security.authz.store.NativePrivilegeStoreCacheTests method: testPopulationOfCacheWhenLoadingPrivilegesForAllApplications issue: https://github.com/elastic/elasticsearch/issues/110789 diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/LicenseDLSFLSRoleIT.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/LicenseDLSFLSRoleIT.java index f81bab4866bdf..552e9f5cba578 100644 --- a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/LicenseDLSFLSRoleIT.java +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/LicenseDLSFLSRoleIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.SecureString; @@ -21,6 +20,8 @@ import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.junit.After; +import org.junit.Before; import org.junit.ClassRule; import java.io.IOException; @@ -50,8 +51,6 @@ public final class LicenseDLSFLSRoleIT extends ESRestTestCase { public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .nodes(1) .distribution(DistributionType.DEFAULT) - // start as "trial" - .setting("xpack.license.self_generated.type", "trial") .setting("xpack.security.enabled", "true") .setting("xpack.security.http.ssl.enabled", "false") .setting("xpack.security.transport.ssl.enabled", "false") @@ -61,6 +60,23 @@ public final class LicenseDLSFLSRoleIT extends ESRestTestCase { .user(READ_SECURITY_USER, READ_SECURITY_PASSWORD.toString(), "read_security_user_role", false) .build(); + @Before + public void setupLicense() throws IOException { + // start with trial license + Request request = new Request("POST", "/_license/start_trial?acknowledge=true"); + Response response = adminClient().performRequest(request); + assertOK(response); + assertTrue((boolean) responseAsMap(response).get("trial_was_started")); + } + + @After + public void removeLicense() throws IOException { + // start with trial license + Request request = new Request("DELETE", "/_license"); + Response response = adminClient().performRequest(request); + assertOK(response); + } + @Override protected String getTestRestCluster() { return cluster.getHttpAddresses(); @@ -78,10 +94,7 @@ protected Settings restClientSettings() { return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } - @SuppressWarnings("unchecked") public void testQueryDLSFLSRolesShowAsDisabled() throws Exception { - // auto-generated "trial" - waitForLicense(adminClient(), "trial"); // neither DLS nor FLS role { RoleDescriptor.IndicesPrivileges[] indicesPrivileges = new RoleDescriptor.IndicesPrivileges[] { @@ -138,7 +151,6 @@ public void testQueryDLSFLSRolesShowAsDisabled() throws Exception { Map responseMap = responseAsMap(response); assertTrue(((Boolean) responseMap.get("basic_was_started"))); assertTrue(((Boolean) responseMap.get("acknowledged"))); - waitForLicense(adminClient(), "basic"); // now the same roles show up as disabled ("enabled" is "false") assertQuery(client(), "", 4, roles -> { roles.sort(Comparator.comparing(o -> ((String) o.get("name")))); @@ -175,22 +187,4 @@ private static void assertRoleEnabled(Map roleMap, boolean enabl assertThat(roleMap.get("transient_metadata"), instanceOf(Map.class)); assertThat(((Map) roleMap.get("transient_metadata")).get("enabled"), equalTo(enabled)); } - - @SuppressWarnings("unchecked") - private static void waitForLicense(RestClient adminClient, String type) throws Exception { - final Request request = new Request("GET", "_license"); - assertBusy(() -> { - Response response; - try { - response = adminClient.performRequest(request); - } catch (ResponseException e) { - throw new AssertionError("license not yet installed", e); - } - assertOK(response); - Map responseMap = responseAsMap(response); - assertTrue(responseMap.containsKey("license")); - assertThat(((Map) responseMap.get("license")).get("status"), equalTo("active")); - assertThat(((Map) responseMap.get("license")).get("type"), equalTo(type)); - }); - } } From 5a10545d371c9665892a431fd7e036b500c6f3ba Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 20 Aug 2024 06:07:08 -0700 Subject: [PATCH 094/389] Upgrade xcontent to Jackson 2.17.0 (#111948) --- docs/changelog/111948.yaml | 5 +++ gradle/verification-metadata.xml | 35 +++++++++++++------ libs/x-content/impl/build.gradle | 2 +- .../provider/json/JsonXContentImpl.java | 2 ++ .../search/MultiSearchRequestTests.java | 12 ++++--- .../index/mapper/DocumentParserTests.java | 2 +- .../HuggingFaceElserResponseEntityTests.java | 2 +- 7 files changed, 42 insertions(+), 18 deletions(-) create mode 100644 docs/changelog/111948.yaml diff --git a/docs/changelog/111948.yaml b/docs/changelog/111948.yaml new file mode 100644 index 0000000000000..a3a592abaf1ca --- /dev/null +++ b/docs/changelog/111948.yaml @@ -0,0 +1,5 @@ +pr: 111948 +summary: Upgrade xcontent to Jackson 2.17.0 +area: Infra/Core +type: upgrade +issues: [] diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 00f1caec24cf7..1001ab2b709dd 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -306,6 +306,11 @@ + + + + + @@ -336,6 +341,11 @@ + + + + + @@ -346,6 +356,11 @@ + + + + + @@ -361,6 +376,11 @@ + + + + + @@ -953,11 +973,6 @@ - - - - - @@ -1746,16 +1761,16 @@ - - - - - + + + + + diff --git a/libs/x-content/impl/build.gradle b/libs/x-content/impl/build.gradle index 41b65044735ca..829b75524baeb 100644 --- a/libs/x-content/impl/build.gradle +++ b/libs/x-content/impl/build.gradle @@ -12,7 +12,7 @@ base { archivesName = "x-content-impl" } -String jacksonVersion = "2.15.0" +String jacksonVersion = "2.17.0" dependencies { compileOnly project(':libs:elasticsearch-core') diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentImpl.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentImpl.java index ae494796c88cb..4e04230a7486e 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentImpl.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentImpl.java @@ -54,6 +54,8 @@ public static final XContent jsonXContent() { jsonFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); jsonFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); jsonFactory.configure(JsonParser.Feature.USE_FAST_DOUBLE_PARSER, true); + // keeping existing behavior of including source, for now + jsonFactory.configure(JsonParser.Feature.INCLUDE_SOURCE_IN_LOCATION, true); jsonXContent = new JsonXContentImpl(); } diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java index a45730a82dbc2..67c8599f47029 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java @@ -45,6 +45,7 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -582,11 +583,12 @@ public void testFailOnExtraCharacters() throws IOException { """, null); fail("should have caught second line; extra closing brackets"); } catch (XContentParseException e) { - assertEquals( - "[1:31] Unexpected close marker '}': expected ']' (for root starting at " - + "[Source: (byte[])\"{ \"query\": {\"match_all\": {}}}}}}different error message\"; line: 1, column: 0])\n " - + "at [Source: (byte[])\"{ \"query\": {\"match_all\": {}}}}}}different error message\"; line: 1, column: 31]", - e.getMessage() + assertThat( + e.getMessage(), + containsString( + "Unexpected close marker '}': expected ']' (for root starting at " + + "[Source: (byte[])\"{ \"query\": {\"match_all\": {}}}}}}different error message\"" + ) ); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index 7fa08acd53882..1a0e2376797b8 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -2642,7 +2642,7 @@ same name need to be part of the same mappings (hence the same document). If th } public void testDeeplyNestedDocument() throws Exception { - int depth = 10000; + int depth = 20; DocumentMapper docMapper = createMapperService(Settings.builder().put(getIndexSettings()).build(), mapping(b -> {})) .documentMapper(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java index c3c416d8fe65e..e350a539ba928 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/huggingface/HuggingFaceElserResponseEntityTests.java @@ -310,7 +310,7 @@ public void testFails_ResponseIsInvalidJson_MissingSquareBracket() { ) ); - assertThat(thrownException.getMessage(), containsString("expected close marker for Array (start marker at [Source: (byte[])")); + assertThat(thrownException.getMessage(), containsString("expected close marker for Array (start marker at")); } public void testFails_ResponseIsInvalidJson_MissingField() { From 3de2587f939b38fa7532844406010822ef3ec260 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 20 Aug 2024 09:23:35 -0400 Subject: [PATCH 095/389] ESQL: Test on-the-wire size for some plan nodes (#111980) This adds some very highly specified tests for the on-the-wire size for plan nodes, especially those with mapping conflicts. We've been having some trouble with this being *very* large on the wire and we'd like to take more care in the future to keep these from growing. The plan is that we'll lower these limits as we go, "ratcheting" the serialization size down as we make improvements. The test will make sure we don't make things worse. --- .../test/ByteSizeEqualsMatcher.java | 43 +++++ .../xpack/esql/analysis/Analyzer.java | 5 +- .../esql/index/EsIndexSerializationTests.java | 102 +++++++++++ .../ExchangeSinkExecSerializationTests.java | 159 ++++++++++++++++++ 4 files changed, 308 insertions(+), 1 deletion(-) create mode 100644 test/framework/src/main/java/org/elasticsearch/test/ByteSizeEqualsMatcher.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java diff --git a/test/framework/src/main/java/org/elasticsearch/test/ByteSizeEqualsMatcher.java b/test/framework/src/main/java/org/elasticsearch/test/ByteSizeEqualsMatcher.java new file mode 100644 index 0000000000000..172d5f2076a0f --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/ByteSizeEqualsMatcher.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.hamcrest.Description; +import org.hamcrest.TypeSafeMatcher; + +/** + * Equality matcher for {@link ByteSizeValue} that has a nice description of failures. + */ +public class ByteSizeEqualsMatcher extends TypeSafeMatcher { + public static ByteSizeEqualsMatcher byteSizeEquals(ByteSizeValue expected) { + return new ByteSizeEqualsMatcher(expected); + } + + private final ByteSizeValue expected; + + private ByteSizeEqualsMatcher(ByteSizeValue expected) { + this.expected = expected; + } + + @Override + protected boolean matchesSafely(ByteSizeValue byteSizeValue) { + return expected.equals(byteSizeValue); + } + + @Override + public void describeTo(Description description) { + description.appendValue(expected.toString()).appendText(" (").appendValue(expected.getBytes()).appendText(" bytes)"); + } + + @Override + protected void describeMismatchSafely(ByteSizeValue item, Description mismatchDescription) { + mismatchDescription.appendValue(item.toString()).appendText(" (").appendValue(item.getBytes()).appendText(" bytes)"); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 4a116fd102cd0..3ffb4acbe6455 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -212,8 +212,11 @@ protected LogicalPlan rule(UnresolvedRelation plan, AnalyzerContext context) { * Specific flattening method, different from the default EsRelation that: * 1. takes care of data type widening (for certain types) * 2. drops the object and keyword hierarchy + *

+ * Public for testing. + *

*/ - private static List mappingAsAttributes(Source source, Map mapping) { + public static List mappingAsAttributes(Source source, Map mapping) { var list = new ArrayList(); mappingAsAttributes(list, source, null, mapping); list.sort(Comparator.comparing(Attribute::name)); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java index 1ac61a2adf68e..e1b56d61a211c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java @@ -7,17 +7,26 @@ package org.elasticsearch.xpack.esql.index; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.type.EsFieldTests; +import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; import java.io.IOException; import java.util.HashMap; import java.util.HashSet; +import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; + +import static org.elasticsearch.test.ByteSizeEqualsMatcher.byteSizeEquals; public class EsIndexSerializationTests extends AbstractWireSerializingTestCase { public static EsIndex randomEsIndex() { @@ -73,4 +82,97 @@ protected EsIndex mutateInstance(EsIndex instance) throws IOException { protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry(EsField.getNamedWriteables()); } + + /** + * Build an {@link EsIndex} with many conflicting fields across many indices. + */ + public static EsIndex indexWithManyConflicts(boolean withParent) { + /* + * The number of fields with a mapping conflict. + */ + int conflictingCount = 250; + /* + * The number of indices that map conflicting fields are "keyword". + * One other index will map the field as "text" + */ + int keywordIndicesCount = 600; + /* + * The number of fields that don't have a mapping conflict. + */ + int nonConflictingCount = 7000; + + Set keywordIndices = new TreeSet<>(); + for (int i = 0; i < keywordIndicesCount; i++) { + keywordIndices.add(String.format(Locale.ROOT, ".ds-logs-apache.access-external-2024.08.09-%08d", i)); + } + + Set textIndices = Set.of("logs-endpoint.events.imported"); + + Map fields = new TreeMap<>(); + for (int i = 0; i < conflictingCount; i++) { + String name = String.format(Locale.ROOT, "blah.blah.blah.blah.blah.blah.conflict.name%04d", i); + Map> conflicts = Map.of("text", textIndices, "keyword", keywordIndices); + fields.put(name, new InvalidMappedField(name, conflicts)); + } + for (int i = 0; i < nonConflictingCount; i++) { + String name = String.format(Locale.ROOT, "blah.blah.blah.blah.blah.blah.nonconflict.name%04d", i); + fields.put(name, new EsField(name, DataType.KEYWORD, Map.of(), true)); + } + + if (withParent) { + EsField parent = new EsField("parent", DataType.OBJECT, Map.copyOf(fields), false); + fields.put("parent", parent); + } + + TreeSet concrete = new TreeSet<>(); + concrete.addAll(keywordIndices); + concrete.addAll(textIndices); + + return new EsIndex("name", fields, concrete); + } + + /** + * Test the size of serializing an index with many conflicts at the root level. + * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. + */ + public void testManyTypeConflicts() throws IOException { + testManyTypeConflicts(false, ByteSizeValue.ofBytes(976591)); + } + + /** + * Test the size of serializing an index with many conflicts inside a "parent" object. + * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. + */ + public void testManyTypeConflictsWithParent() throws IOException { + testManyTypeConflicts(true, ByteSizeValue.ofBytes(1921374)); + /* + * History: + * 16.9mb - start + * 1.8mb - shorten error messages for UnsupportedAttributes #111973 + */ + } + + /** + * Test the size of serializing an index with many conflicts. Callers of + * this method intentionally use a very precise size for the serialized + * data so a programmer making changes has to think when this size changes. + *

+ * In general, shrinking the over the wire size is great and the precise + * size should just ratchet downwards. Small upwards movement is fine so + * long as you understand why the change is happening and you think it's + * worth it for the data node request for a big index to grow. + *

+ *

+ * Large upwards movement in the size is not fine! Folks frequently make + * requests across large clusters with many fields and these requests can + * really clog up the network interface. Super large results here can make + * ESQL impossible to use at all for big mappings with many conflicts. + *

+ */ + private void testManyTypeConflicts(boolean withParent, ByteSizeValue expected) throws IOException { + try (BytesStreamOutput out = new BytesStreamOutput()) { + indexWithManyConflicts(withParent).writeTo(out); + assertThat(ByteSizeValue.ofBytes(out.bytes().length()), byteSizeEquals(expected)); + } + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java new file mode 100644 index 0000000000000..237f8d6a9c580 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java @@ -0,0 +1,159 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plan.physical; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.analysis.Analyzer; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.index.EsIndex; +import org.elasticsearch.xpack.esql.index.EsIndexSerializationTests; +import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.plan.logical.EsRelation; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.Project; +import org.elasticsearch.xpack.esql.session.Configuration; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.test.ByteSizeEqualsMatcher.byteSizeEquals; +import static org.elasticsearch.xpack.esql.ConfigurationTestUtils.randomConfiguration; +import static org.hamcrest.Matchers.equalTo; + +public class ExchangeSinkExecSerializationTests extends ESTestCase { + // TODO port this to AbstractPhysicalPlanSerializationTests when implementing NamedWriteable + private Configuration config; + + public static Source randomSource() { + int lineNumber = between(0, EXAMPLE_QUERY.length - 1); + String line = EXAMPLE_QUERY[lineNumber]; + int offset = between(0, line.length() - 2); + int length = between(1, line.length() - offset - 1); + String text = line.substring(offset, offset + length); + return new Source(lineNumber + 1, offset, text); + } + + /** + * Test the size of serializing a plan with many conflicts. + * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. + */ + public void testManyTypeConflicts() throws IOException { + testManyTypeConflicts(false, ByteSizeValue.ofBytes(2444252)); + } + + /** + * Test the size of serializing a plan with many conflicts. + * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. + */ + public void testManyTypeConflictsWithParent() throws IOException { + testManyTypeConflicts(true, ByteSizeValue.ofBytes(5885765)); + /* + * History: + * 2 gb+ - start + * 43.3mb - Cache attribute subclasses #111447 + * 5.6mb - shorten error messages for UnsupportedAttributes #111973 + */ + } + + /** + * Test the size of serializing a plan with many conflicts. Callers of + * this method intentionally use a very precise size for the serialized + * data so a programmer making changes has to think when this size changes. + *

+ * In general, shrinking the over the wire size is great and the precise + * size should just ratchet downwards. Small upwards movement is fine so + * long as you understand why the change is happening and you think it's + * worth it for the data node request for a big index to grow. + *

+ *

+ * Large upwards movement in the size is not fine! Folks frequently make + * requests across large clusters with many fields and these requests can + * really clog up the network interface. Super large results here can make + * ESQL impossible to use at all for big mappings with many conflicts. + *

+ */ + private void testManyTypeConflicts(boolean withParent, ByteSizeValue expected) throws IOException { + EsIndex index = EsIndexSerializationTests.indexWithManyConflicts(withParent); + List attributes = Analyzer.mappingAsAttributes(randomSource(), index.mapping()); + EsRelation relation = new EsRelation(randomSource(), index, attributes, IndexMode.STANDARD); + Limit limit = new Limit(randomSource(), new Literal(randomSource(), 10, DataType.INTEGER), relation); + Project project = new Project(randomSource(), limit, limit.output()); + FragmentExec fragmentExec = new FragmentExec(project); + ExchangeSinkExec exchangeSinkExec = new ExchangeSinkExec(randomSource(), fragmentExec.output(), false, fragmentExec); + try ( + BytesStreamOutput out = new BytesStreamOutput(); + PlanStreamOutput pso = new PlanStreamOutput(out, new PlanNameRegistry(), configuration()) + ) { + pso.writePhysicalPlanNode(exchangeSinkExec); + assertThat(ByteSizeValue.ofBytes(out.bytes().length()), byteSizeEquals(expected)); + try ( + PlanStreamInput psi = new PlanStreamInput( + out.bytes().streamInput(), + new PlanNameRegistry(), + getNamedWriteableRegistry(), + configuration() + ) + ) { + assertThat(psi.readPhysicalPlanNode(), equalTo(exchangeSinkExec)); + } + } + } + + private NamedWriteableRegistry getNamedWriteableRegistry() { + List entries = new ArrayList<>(); + entries.addAll(PhysicalPlan.getNamedWriteables()); + entries.addAll(LogicalPlan.getNamedWriteables()); + entries.addAll(AggregateFunction.getNamedWriteables()); + entries.addAll(Expression.getNamedWriteables()); + entries.addAll(Attribute.getNamedWriteables()); + entries.addAll(EsField.getNamedWriteables()); + entries.addAll(Block.getNamedWriteables()); + entries.addAll(NamedExpression.getNamedWriteables()); + entries.addAll(new SearchModule(Settings.EMPTY, List.of()).getNamedWriteables()); + return new NamedWriteableRegistry(entries); + } + + private Configuration configuration() { + return config; + } + + private static final String[] EXAMPLE_QUERY = new String[] { + "I am the very model of a modern Major-Gineral,", + "I've information vegetable, animal, and mineral,", + "I know the kings of England, and I quote the fights historical", + "From Marathon to Waterloo, in order categorical;", + "I'm very well acquainted, too, with matters mathematical,", + "I understand equations, both the simple and quadratical,", + "About binomial theorem I'm teeming with a lot o' news,", + "With many cheerful facts about the square of the hypotenuse." }; + + @Before + public void initConfig() { + config = randomConfiguration(String.join("\n", EXAMPLE_QUERY), Map.of()); + } +} From e3f378ebd289876fb15afa3e03aabd502fae7d28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iv=C3=A1n=20Cea=20Fontenla?= Date: Tue, 20 Aug 2024 15:24:55 +0200 Subject: [PATCH 096/389] ESQL: Strings support for MAX and MIN aggregations (#111544) Support Version, Keyword and Text in Max an Min aggregations. The current implementation of both max and min does: For non-grouping: - Store a BytesRef - When there's a max/min, copy it to the internal array. Grow it if needed For grouping: - Keep an array of BytesRef (null by default: there's no "initial/default value" here, as there's no "MAX" value for a string) - Each BytesRef stores their own array, which will be grown as needed to copy the new max/min Some notes: - It's not shrinking the arrays, as to avoid having to copy, and potentially grow it again - It's using raw arrays. But maybe it should use BigArrays to compute in the circuit breaker? Part of https://github.com/elastic/elasticsearch/issues/110346 --- docs/changelog/111544.yaml | 5 + .../esql/functions/kibana/definition/max.json | 36 +++ .../esql/functions/kibana/definition/min.json | 36 +++ .../esql/functions/types/max.asciidoc | 3 + .../esql/functions/types/min.asciidoc | 3 + .../compute/gen/AggregatorImplementer.java | 2 +- .../org/elasticsearch/compute/gen/Types.java | 1 + .../MaxBytesRefAggregatorFunction.java | 133 +++++++++++ ...MaxBytesRefAggregatorFunctionSupplier.java | 38 ++++ ...MaxBytesRefGroupingAggregatorFunction.java | 210 ++++++++++++++++++ .../MinBytesRefAggregatorFunction.java | 133 +++++++++++ ...MinBytesRefAggregatorFunctionSupplier.java | 38 ++++ ...MinBytesRefGroupingAggregatorFunction.java | 210 ++++++++++++++++++ .../aggregation/AbstractArrayState.java | 2 +- .../aggregation/BytesRefArrayState.java | 153 +++++++++++++ .../aggregation/MaxBytesRefAggregator.java | 149 +++++++++++++ .../aggregation/MinBytesRefAggregator.java | 149 +++++++++++++ .../operator/BreakingBytesRefBuilder.java | 10 +- .../MaxBytesRefAggregatorFunctionTests.java | 53 +++++ ...tesRefGroupingAggregatorFunctionTests.java | 62 ++++++ .../MinBytesRefAggregatorFunctionTests.java | 53 +++++ ...tesRefGroupingAggregatorFunctionTests.java | 62 ++++++ .../BreakingBytesRefBuilderTests.java | 26 ++- .../src/main/resources/meta.csv-spec | 12 +- .../src/main/resources/stats.csv-spec | 160 +++++++++++++ .../xpack/esql/action/EsqlCapabilities.java | 5 + .../expression/function/aggregate/Max.java | 24 +- .../expression/function/aggregate/Min.java | 24 +- .../xpack/esql/planner/AggregateMapper.java | 2 +- .../xpack/esql/analysis/AnalyzerTests.java | 20 +- .../xpack/esql/analysis/VerifierTests.java | 4 +- .../function/aggregate/MaxTests.java | 40 +++- .../function/aggregate/MinTests.java | 40 +++- 33 files changed, 1848 insertions(+), 50 deletions(-) create mode 100644 docs/changelog/111544.yaml create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/BytesRefArrayState.java create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxBytesRefAggregator.java create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MinBytesRefAggregator.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunctionTests.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunctionTests.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunctionTests.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunctionTests.java diff --git a/docs/changelog/111544.yaml b/docs/changelog/111544.yaml new file mode 100644 index 0000000000000..d4c46f485e664 --- /dev/null +++ b/docs/changelog/111544.yaml @@ -0,0 +1,5 @@ +pr: 111544 +summary: "ESQL: Strings support for MAX and MIN aggregations" +area: ES|QL +type: feature +issues: [] diff --git a/docs/reference/esql/functions/kibana/definition/max.json b/docs/reference/esql/functions/kibana/definition/max.json index 853cb9f9a97c3..725b42763816d 100644 --- a/docs/reference/esql/functions/kibana/definition/max.json +++ b/docs/reference/esql/functions/kibana/definition/max.json @@ -64,6 +64,18 @@ "variadic" : false, "returnType" : "ip" }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "keyword" + }, { "params" : [ { @@ -75,6 +87,30 @@ ], "variadic" : false, "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "text" + }, + { + "params" : [ + { + "name" : "field", + "type" : "version", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "version" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/min.json b/docs/reference/esql/functions/kibana/definition/min.json index 1c0c02eb9860f..68dfdd6cfd8c0 100644 --- a/docs/reference/esql/functions/kibana/definition/min.json +++ b/docs/reference/esql/functions/kibana/definition/min.json @@ -64,6 +64,18 @@ "variadic" : false, "returnType" : "ip" }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "keyword" + }, { "params" : [ { @@ -75,6 +87,30 @@ ], "variadic" : false, "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "text" + }, + { + "params" : [ + { + "name" : "field", + "type" : "version", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "version" } ], "examples" : [ diff --git a/docs/reference/esql/functions/types/max.asciidoc b/docs/reference/esql/functions/types/max.asciidoc index 5b7293d4a4293..705745d76dbab 100644 --- a/docs/reference/esql/functions/types/max.asciidoc +++ b/docs/reference/esql/functions/types/max.asciidoc @@ -10,5 +10,8 @@ datetime | datetime double | double integer | integer ip | ip +keyword | keyword long | long +text | text +version | version |=== diff --git a/docs/reference/esql/functions/types/min.asciidoc b/docs/reference/esql/functions/types/min.asciidoc index 5b7293d4a4293..705745d76dbab 100644 --- a/docs/reference/esql/functions/types/min.asciidoc +++ b/docs/reference/esql/functions/types/min.asciidoc @@ -10,5 +10,8 @@ datetime | datetime double | double integer | integer ip | ip +keyword | keyword long | long +text | text +version | version |=== diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java index b3d32a82cc7a9..914724905541d 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java @@ -102,7 +102,7 @@ public AggregatorImplementer(Elements elements, TypeElement declarationType, Int this.createParameters = init.getParameters() .stream() .map(Parameter::from) - .filter(f -> false == f.type().equals(BIG_ARRAYS)) + .filter(f -> false == f.type().equals(BIG_ARRAYS) && false == f.type().equals(DRIVER_CONTEXT)) .toList(); this.implementation = ClassName.get( diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java index 3150741ddcb05..2b42adc67d71a 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java @@ -34,6 +34,7 @@ public class Types { static final TypeName BLOCK_ARRAY = ArrayTypeName.of(BLOCK); static final ClassName VECTOR = ClassName.get(DATA_PACKAGE, "Vector"); + static final ClassName CIRCUIT_BREAKER = ClassName.get("org.elasticsearch.common.breaker", "CircuitBreaker"); static final ClassName BIG_ARRAYS = ClassName.get("org.elasticsearch.common.util", "BigArrays"); static final ClassName BOOLEAN_BLOCK = ClassName.get(DATA_PACKAGE, "BooleanBlock"); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunction.java new file mode 100644 index 0000000000000..62897c61ea80e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunction.java @@ -0,0 +1,133 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link MaxBytesRefAggregator}. + * This class is generated. Do not edit it. + */ +public final class MaxBytesRefAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("max", ElementType.BYTES_REF), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final DriverContext driverContext; + + private final MaxBytesRefAggregator.SingleState state; + + private final List channels; + + public MaxBytesRefAggregatorFunction(DriverContext driverContext, List channels, + MaxBytesRefAggregator.SingleState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static MaxBytesRefAggregatorFunction create(DriverContext driverContext, + List channels) { + return new MaxBytesRefAggregatorFunction(driverContext, channels, MaxBytesRefAggregator.initSingle(driverContext)); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + BytesRefBlock block = page.getBlock(channels.get(0)); + BytesRefVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(BytesRefVector vector) { + BytesRef scratch = new BytesRef(); + for (int i = 0; i < vector.getPositionCount(); i++) { + MaxBytesRefAggregator.combine(state, vector.getBytesRef(i, scratch)); + } + } + + private void addRawBlock(BytesRefBlock block) { + BytesRef scratch = new BytesRef(); + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + MaxBytesRefAggregator.combine(state, block.getBytesRef(i, scratch)); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block maxUncast = page.getBlock(channels.get(0)); + if (maxUncast.areAllValuesNull()) { + return; + } + BytesRefVector max = ((BytesRefBlock) maxUncast).asVector(); + assert max.getPositionCount() == 1; + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; + BytesRef scratch = new BytesRef(); + MaxBytesRefAggregator.combineIntermediate(state, max.getBytesRef(0, scratch), seen.getBoolean(0)); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = MaxBytesRefAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..7c8af2e0c7e6d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunctionSupplier.java @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link MaxBytesRefAggregator}. + * This class is generated. Do not edit it. + */ +public final class MaxBytesRefAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public MaxBytesRefAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public MaxBytesRefAggregatorFunction aggregator(DriverContext driverContext) { + return MaxBytesRefAggregatorFunction.create(driverContext, channels); + } + + @Override + public MaxBytesRefGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return MaxBytesRefGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "max of bytes"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..1720a8863a613 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunction.java @@ -0,0 +1,210 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link MaxBytesRefAggregator}. + * This class is generated. Do not edit it. + */ +public final class MaxBytesRefGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("max", ElementType.BYTES_REF), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final MaxBytesRefAggregator.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public MaxBytesRefGroupingAggregatorFunction(List channels, + MaxBytesRefAggregator.GroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static MaxBytesRefGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new MaxBytesRefGroupingAggregatorFunction(channels, MaxBytesRefAggregator.initGrouping(driverContext), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + BytesRefBlock valuesBlock = page.getBlock(channels.get(0)); + BytesRefVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + MaxBytesRefAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + MaxBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + MaxBytesRefAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + MaxBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block maxUncast = page.getBlock(channels.get(0)); + if (maxUncast.areAllValuesNull()) { + return; + } + BytesRefVector max = ((BytesRefBlock) maxUncast).asVector(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert max.getPositionCount() == seen.getPositionCount(); + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + MaxBytesRefAggregator.combineIntermediate(state, groupId, max.getBytesRef(groupPosition + positionOffset, scratch), seen.getBoolean(groupPosition + positionOffset)); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + MaxBytesRefAggregator.GroupingState inState = ((MaxBytesRefGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + MaxBytesRefAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = MaxBytesRefAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunction.java new file mode 100644 index 0000000000000..3346dd762f17f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunction.java @@ -0,0 +1,133 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link MinBytesRefAggregator}. + * This class is generated. Do not edit it. + */ +public final class MinBytesRefAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("min", ElementType.BYTES_REF), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final DriverContext driverContext; + + private final MinBytesRefAggregator.SingleState state; + + private final List channels; + + public MinBytesRefAggregatorFunction(DriverContext driverContext, List channels, + MinBytesRefAggregator.SingleState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static MinBytesRefAggregatorFunction create(DriverContext driverContext, + List channels) { + return new MinBytesRefAggregatorFunction(driverContext, channels, MinBytesRefAggregator.initSingle(driverContext)); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + BytesRefBlock block = page.getBlock(channels.get(0)); + BytesRefVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(BytesRefVector vector) { + BytesRef scratch = new BytesRef(); + for (int i = 0; i < vector.getPositionCount(); i++) { + MinBytesRefAggregator.combine(state, vector.getBytesRef(i, scratch)); + } + } + + private void addRawBlock(BytesRefBlock block) { + BytesRef scratch = new BytesRef(); + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + MinBytesRefAggregator.combine(state, block.getBytesRef(i, scratch)); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block minUncast = page.getBlock(channels.get(0)); + if (minUncast.areAllValuesNull()) { + return; + } + BytesRefVector min = ((BytesRefBlock) minUncast).asVector(); + assert min.getPositionCount() == 1; + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; + BytesRef scratch = new BytesRef(); + MinBytesRefAggregator.combineIntermediate(state, min.getBytesRef(0, scratch), seen.getBoolean(0)); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = MinBytesRefAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..cb6ab0d06d401 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunctionSupplier.java @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link MinBytesRefAggregator}. + * This class is generated. Do not edit it. + */ +public final class MinBytesRefAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public MinBytesRefAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public MinBytesRefAggregatorFunction aggregator(DriverContext driverContext) { + return MinBytesRefAggregatorFunction.create(driverContext, channels); + } + + @Override + public MinBytesRefGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return MinBytesRefGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "min of bytes"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..eb309614fcf3c --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunction.java @@ -0,0 +1,210 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link MinBytesRefAggregator}. + * This class is generated. Do not edit it. + */ +public final class MinBytesRefGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("min", ElementType.BYTES_REF), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final MinBytesRefAggregator.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public MinBytesRefGroupingAggregatorFunction(List channels, + MinBytesRefAggregator.GroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static MinBytesRefGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new MinBytesRefGroupingAggregatorFunction(channels, MinBytesRefAggregator.initGrouping(driverContext), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + BytesRefBlock valuesBlock = page.getBlock(channels.get(0)); + BytesRefVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + MinBytesRefAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + MinBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + MinBytesRefAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + MinBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block minUncast = page.getBlock(channels.get(0)); + if (minUncast.areAllValuesNull()) { + return; + } + BytesRefVector min = ((BytesRefBlock) minUncast).asVector(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert min.getPositionCount() == seen.getPositionCount(); + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + MinBytesRefAggregator.combineIntermediate(state, groupId, min.getBytesRef(groupPosition + positionOffset, scratch), seen.getBoolean(groupPosition + positionOffset)); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + MinBytesRefAggregator.GroupingState inState = ((MinBytesRefGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + MinBytesRefAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = MinBytesRefAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractArrayState.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractArrayState.java index 0dc008cb22396..1573efdd81059 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractArrayState.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractArrayState.java @@ -21,7 +21,7 @@ public AbstractArrayState(BigArrays bigArrays) { this.bigArrays = bigArrays; } - final boolean hasValue(int groupId) { + boolean hasValue(int groupId) { return seen == null || seen.get(groupId); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/BytesRefArrayState.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/BytesRefArrayState.java new file mode 100644 index 0000000000000..eb0a992c8610f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/BytesRefArrayState.java @@ -0,0 +1,153 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator state for an array of BytesRefs. It is created in a mode where it + * won't track the {@code groupId}s that are sent to it and it is the + * responsibility of the caller to only fetch values for {@code groupId}s + * that it has sent using the {@code selected} parameter when building the + * results. This is fine when there are no {@code null} values in the input + * data. But once there are null values in the input data it is + * much more convenient to only send non-null values and + * the tracking built into the grouping code can't track that. In that case + * call {@link #enableGroupIdTracking} to transition the state into a mode + * where it'll track which {@code groupIds} have been written. + *

+ * This class is a specialized version of the {@code X-ArrayState.java.st} template. + *

+ */ +public final class BytesRefArrayState implements GroupingAggregatorState, Releasable { + private final BigArrays bigArrays; + private final CircuitBreaker breaker; + private final String breakerLabel; + private ObjectArray values; + /** + * If false, no group id is expected to have nulls. + * If true, they may have nulls. + */ + private boolean groupIdTrackingEnabled; + + BytesRefArrayState(BigArrays bigArrays, CircuitBreaker breaker, String breakerLabel) { + this.bigArrays = bigArrays; + this.breaker = breaker; + this.breakerLabel = breakerLabel; + this.values = bigArrays.newObjectArray(0); + } + + BytesRef get(int groupId) { + return values.get(groupId).bytesRefView(); + } + + void set(int groupId, BytesRef value) { + ensureCapacity(groupId); + + var currentBuilder = values.get(groupId); + if (currentBuilder == null) { + currentBuilder = new BreakingBytesRefBuilder(breaker, breakerLabel, value.length); + values.set(groupId, currentBuilder); + } + + currentBuilder.copyBytes(value); + } + + Block toValuesBlock(IntVector selected, DriverContext driverContext) { + if (false == groupIdTrackingEnabled) { + try (var builder = driverContext.blockFactory().newBytesRefVectorBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + var value = get(group); + builder.appendBytesRef(value); + } + return builder.build().asBlock(); + } + } + try (var builder = driverContext.blockFactory().newBytesRefBlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group)) { + var value = get(group); + builder.appendBytesRef(value); + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + private void ensureCapacity(int groupId) { + var minSize = groupId + 1; + if (minSize > values.size()) { + long prevSize = values.size(); + values = bigArrays.grow(values, minSize); + } + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + assert blocks.length >= offset + 2; + try ( + var valuesBuilder = driverContext.blockFactory().newBytesRefVectorBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) + ) { + var emptyBytesRef = new BytesRef(); + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group)) { + var value = get(group); + valuesBuilder.appendBytesRef(value); + } else { + valuesBuilder.appendBytesRef(emptyBytesRef); // TODO can we just use null? + } + hasValueBuilder.appendBoolean(i, hasValue(group)); + } + blocks[offset] = valuesBuilder.build().asBlock(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); + } + } + + boolean hasValue(int groupId) { + return groupId < values.size() && values.get(groupId) != null; + } + + /** + * Switches this array state into tracking which group ids are set. This is + * idempotent and fast if already tracking so it's safe to, say, call it once + * for every block of values that arrives containing {@code null}. + * + *

+ * This class tracks seen group IDs differently from {@code AbstractArrayState}, as it just + * stores a flag to know if optimizations can be made. + *

+ */ + void enableGroupIdTracking(SeenGroupIds seenGroupIds) { + this.groupIdTrackingEnabled = true; + } + + @Override + public void close() { + for (int i = 0; i < values.size(); i++) { + Releasables.closeWhileHandlingException(values.get(i)); + } + + Releasables.close(values); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxBytesRefAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxBytesRefAggregator.java new file mode 100644 index 0000000000000..144214f93571e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxBytesRefAggregator.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator for `Max`, that works with BytesRef values. + * Gets the biggest BytesRef value, based on its bytes natural order (Delegated to {@link BytesRef#compareTo}). + */ +@Aggregator({ @IntermediateState(name = "max", type = "BYTES_REF"), @IntermediateState(name = "seen", type = "BOOLEAN") }) +@GroupingAggregator +class MaxBytesRefAggregator { + private static boolean isBetter(BytesRef value, BytesRef otherValue) { + return value.compareTo(otherValue) > 0; + } + + public static SingleState initSingle(DriverContext driverContext) { + return new SingleState(driverContext.breaker()); + } + + public static void combine(SingleState state, BytesRef value) { + state.add(value); + } + + public static void combineIntermediate(SingleState state, BytesRef value, boolean seen) { + if (seen) { + combine(state, value); + } + } + + public static Block evaluateFinal(SingleState state, DriverContext driverContext) { + return state.toBlock(driverContext); + } + + public static GroupingState initGrouping(DriverContext driverContext) { + return new GroupingState(driverContext.bigArrays(), driverContext.breaker()); + } + + public static void combine(GroupingState state, int groupId, BytesRef value) { + state.add(groupId, value); + } + + public static void combineIntermediate(GroupingState state, int groupId, BytesRef value, boolean seen) { + if (seen) { + state.add(groupId, value); + } + } + + public static void combineStates(GroupingState state, int groupId, GroupingState otherState, int otherGroupId) { + state.combine(groupId, otherState, otherGroupId); + } + + public static Block evaluateFinal(GroupingState state, IntVector selected, DriverContext driverContext) { + return state.toBlock(selected, driverContext); + } + + public static class GroupingState implements Releasable { + private final BytesRefArrayState internalState; + + private GroupingState(BigArrays bigArrays, CircuitBreaker breaker) { + this.internalState = new BytesRefArrayState(bigArrays, breaker, "max_bytes_ref_grouping_aggregator"); + } + + public void add(int groupId, BytesRef value) { + if (internalState.hasValue(groupId) == false || isBetter(value, internalState.get(groupId))) { + internalState.set(groupId, value); + } + } + + public void combine(int groupId, GroupingState otherState, int otherGroupId) { + if (otherState.internalState.hasValue(otherGroupId)) { + add(groupId, otherState.internalState.get(otherGroupId)); + } + } + + void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + internalState.toIntermediate(blocks, offset, selected, driverContext); + } + + Block toBlock(IntVector selected, DriverContext driverContext) { + return internalState.toValuesBlock(selected, driverContext); + } + + void enableGroupIdTracking(SeenGroupIds seen) { + internalState.enableGroupIdTracking(seen); + } + + @Override + public void close() { + Releasables.close(internalState); + } + } + + public static class SingleState implements Releasable { + private final BreakingBytesRefBuilder internalState; + private boolean seen; + + private SingleState(CircuitBreaker breaker) { + this.internalState = new BreakingBytesRefBuilder(breaker, "max_bytes_ref_aggregator"); + this.seen = false; + } + + public void add(BytesRef value) { + if (seen == false || isBetter(value, internalState.bytesRefView())) { + seen = true; + + internalState.grow(value.length); + internalState.setLength(value.length); + + System.arraycopy(value.bytes, value.offset, internalState.bytes(), 0, value.length); + } + } + + void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = driverContext.blockFactory().newConstantBytesRefBlockWith(internalState.bytesRefView(), 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + } + + Block toBlock(DriverContext driverContext) { + if (seen == false) { + return driverContext.blockFactory().newConstantNullBlock(1); + } + + return driverContext.blockFactory().newConstantBytesRefBlockWith(internalState.bytesRefView(), 1); + } + + @Override + public void close() { + Releasables.close(internalState); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MinBytesRefAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MinBytesRefAggregator.java new file mode 100644 index 0000000000000..830900702a371 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MinBytesRefAggregator.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator for `Min`, that works with BytesRef values. + * Gets the smallest BytesRef value, based on its bytes natural order (Delegated to {@link BytesRef#compareTo}). + */ +@Aggregator({ @IntermediateState(name = "min", type = "BYTES_REF"), @IntermediateState(name = "seen", type = "BOOLEAN") }) +@GroupingAggregator +class MinBytesRefAggregator { + private static boolean isBetter(BytesRef value, BytesRef otherValue) { + return value.compareTo(otherValue) < 0; + } + + public static SingleState initSingle(DriverContext driverContext) { + return new SingleState(driverContext.breaker()); + } + + public static void combine(SingleState state, BytesRef value) { + state.add(value); + } + + public static void combineIntermediate(SingleState state, BytesRef value, boolean seen) { + if (seen) { + combine(state, value); + } + } + + public static Block evaluateFinal(SingleState state, DriverContext driverContext) { + return state.toBlock(driverContext); + } + + public static GroupingState initGrouping(DriverContext driverContext) { + return new GroupingState(driverContext.bigArrays(), driverContext.breaker()); + } + + public static void combine(GroupingState state, int groupId, BytesRef value) { + state.add(groupId, value); + } + + public static void combineIntermediate(GroupingState state, int groupId, BytesRef value, boolean seen) { + if (seen) { + state.add(groupId, value); + } + } + + public static void combineStates(GroupingState state, int groupId, GroupingState otherState, int otherGroupId) { + state.combine(groupId, otherState, otherGroupId); + } + + public static Block evaluateFinal(GroupingState state, IntVector selected, DriverContext driverContext) { + return state.toBlock(selected, driverContext); + } + + public static class GroupingState implements Releasable { + private final BytesRefArrayState internalState; + + private GroupingState(BigArrays bigArrays, CircuitBreaker breaker) { + this.internalState = new BytesRefArrayState(bigArrays, breaker, "min_bytes_ref_grouping_aggregator"); + } + + public void add(int groupId, BytesRef value) { + if (internalState.hasValue(groupId) == false || isBetter(value, internalState.get(groupId))) { + internalState.set(groupId, value); + } + } + + public void combine(int groupId, GroupingState otherState, int otherGroupId) { + if (otherState.internalState.hasValue(otherGroupId)) { + add(groupId, otherState.internalState.get(otherGroupId)); + } + } + + void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + internalState.toIntermediate(blocks, offset, selected, driverContext); + } + + Block toBlock(IntVector selected, DriverContext driverContext) { + return internalState.toValuesBlock(selected, driverContext); + } + + void enableGroupIdTracking(SeenGroupIds seen) { + internalState.enableGroupIdTracking(seen); + } + + @Override + public void close() { + Releasables.close(internalState); + } + } + + public static class SingleState implements Releasable { + private final BreakingBytesRefBuilder internalState; + private boolean seen; + + private SingleState(CircuitBreaker breaker) { + this.internalState = new BreakingBytesRefBuilder(breaker, "min_bytes_ref_aggregator"); + this.seen = false; + } + + public void add(BytesRef value) { + if (seen == false || isBetter(value, internalState.bytesRefView())) { + seen = true; + + internalState.grow(value.length); + internalState.setLength(value.length); + + System.arraycopy(value.bytes, value.offset, internalState.bytes(), 0, value.length); + } + } + + void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = driverContext.blockFactory().newConstantBytesRefBlockWith(internalState.bytesRefView(), 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + } + + Block toBlock(DriverContext driverContext) { + if (seen == false) { + return driverContext.blockFactory().newConstantNullBlock(1); + } + + return driverContext.blockFactory().newConstantBytesRefBlockWith(internalState.bytesRefView(), 1); + } + + @Override + public void close() { + Releasables.close(internalState); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilder.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilder.java index 17e67335919b1..2578452ad9062 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilder.java @@ -131,6 +131,14 @@ public void append(BytesRef bytes) { append(bytes.bytes, bytes.offset, bytes.length); } + /** + * Set the content of the builder to the given bytes. + */ + public void copyBytes(BytesRef newBytes) { + clear(); + append(newBytes); + } + /** * Reset the builder to an empty bytes array. Doesn't deallocate any memory. */ @@ -141,7 +149,7 @@ public void clear() { /** * Returns a view of the data added as a {@link BytesRef}. Importantly, this does not * copy the bytes and any further modification to the {@link BreakingBytesRefBuilder} - * will modify the returned {@link BytesRef}. The called must copy the bytes + * will modify the returned {@link BytesRef}. The caller must copy the bytes * if they wish to keep them. */ public BytesRef bytesRefView() { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunctionTests.java new file mode 100644 index 0000000000000..adc891a6a977d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBytesRefAggregatorFunctionTests.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.operator.SequenceBytesRefBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; + +import java.util.Comparator; +import java.util.List; +import java.util.Optional; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class MaxBytesRefAggregatorFunctionTests extends AggregatorFunctionTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceBytesRefBlockSourceOperator( + blockFactory, + IntStream.range(0, size).mapToObj(l -> new BytesRef(randomAlphaOfLengthBetween(0, 100))) + ); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MaxBytesRefAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "max of bytes"; + } + + @Override + public void assertSimpleOutput(List input, Block result) { + Optional max = input.stream().flatMap(b -> allBytesRefs(b)).max(Comparator.naturalOrder()); + if (max.isEmpty()) { + assertThat(result.isNull(0), equalTo(true)); + return; + } + assertThat(result.isNull(0), equalTo(false)); + assertThat(BlockUtils.toJavaObject(result, 0), equalTo(max.get())); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunctionTests.java new file mode 100644 index 0000000000000..75a6a839ea62d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunctionTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.LongBytesRefTupleBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.xpack.esql.core.type.DataType; + +import java.util.Comparator; +import java.util.List; +import java.util.Optional; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class MaxBytesRefGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { + + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new LongBytesRefTupleBlockSourceOperator( + blockFactory, + IntStream.range(0, size).mapToObj(l -> Tuple.tuple(randomLongBetween(0, 4), new BytesRef(randomAlphaOfLengthBetween(0, 100)))) + ); + } + + @Override + protected DataType acceptedDataType() { + return DataType.IP; + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MaxBytesRefAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "max of bytes"; + } + + @Override + protected void assertSimpleGroup(List input, Block result, int position, Long group) { + Optional max = input.stream().flatMap(p -> allBytesRefs(p, group)).max(Comparator.naturalOrder()); + if (max.isEmpty()) { + assertThat(result.isNull(position), equalTo(true)); + return; + } + assertThat(result.isNull(position), equalTo(false)); + assertThat(BlockUtils.toJavaObject(result, position), equalTo(max.get())); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunctionTests.java new file mode 100644 index 0000000000000..b4383d6b0f56e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBytesRefAggregatorFunctionTests.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.operator.SequenceBytesRefBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; + +import java.util.Comparator; +import java.util.List; +import java.util.Optional; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class MinBytesRefAggregatorFunctionTests extends AggregatorFunctionTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceBytesRefBlockSourceOperator( + blockFactory, + IntStream.range(0, size).mapToObj(l -> new BytesRef(randomAlphaOfLengthBetween(0, 100))) + ); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MinBytesRefAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "min of bytes"; + } + + @Override + public void assertSimpleOutput(List input, Block result) { + Optional max = input.stream().flatMap(b -> allBytesRefs(b)).min(Comparator.naturalOrder()); + if (max.isEmpty()) { + assertThat(result.isNull(0), equalTo(true)); + return; + } + assertThat(result.isNull(0), equalTo(false)); + assertThat(BlockUtils.toJavaObject(result, 0), equalTo(max.get())); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunctionTests.java new file mode 100644 index 0000000000000..d4cfca819f3b7 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunctionTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.LongBytesRefTupleBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.xpack.esql.core.type.DataType; + +import java.util.Comparator; +import java.util.List; +import java.util.Optional; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class MinBytesRefGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { + + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new LongBytesRefTupleBlockSourceOperator( + blockFactory, + IntStream.range(0, size).mapToObj(l -> Tuple.tuple(randomLongBetween(0, 4), new BytesRef(randomAlphaOfLengthBetween(0, 100)))) + ); + } + + @Override + protected DataType acceptedDataType() { + return DataType.IP; + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MinBytesRefAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "min of bytes"; + } + + @Override + protected void assertSimpleGroup(List input, Block result, int position, Long group) { + Optional max = input.stream().flatMap(p -> allBytesRefs(p, group)).min(Comparator.naturalOrder()); + if (max.isEmpty()) { + assertThat(result.isNull(position), equalTo(true)); + return; + } + assertThat(result.isNull(position), equalTo(false)); + assertThat(BlockUtils.toJavaObject(result, position), equalTo(max.get())); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilderTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilderTests.java index 24f5297a0d6fe..266c17febc5b3 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilderTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilderTests.java @@ -32,7 +32,7 @@ public void testBreakOnBuild() { public void testAddByte() { testAgainstOracle(() -> new TestIteration() { - byte b = randomByte(); + final byte b = randomByte(); @Override public int size() { @@ -53,7 +53,7 @@ public void applyToOracle(BytesRefBuilder oracle) { public void testAddBytesRef() { testAgainstOracle(() -> new TestIteration() { - BytesRef ref = new BytesRef(randomAlphaOfLengthBetween(1, 100)); + final BytesRef ref = new BytesRef(randomAlphaOfLengthBetween(1, 100)); @Override public int size() { @@ -72,10 +72,23 @@ public void applyToOracle(BytesRefBuilder oracle) { }); } + public void testCopyBytes() { + CircuitBreaker breaker = new MockBigArrays.LimitedBreaker(CircuitBreaker.REQUEST, ByteSizeValue.ofBytes(300)); + try (BreakingBytesRefBuilder builder = new BreakingBytesRefBuilder(breaker, "test")) { + String initialValue = randomAlphaOfLengthBetween(1, 50); + builder.copyBytes(new BytesRef(initialValue)); + assertThat(builder.bytesRefView().utf8ToString(), equalTo(initialValue)); + + String newValue = randomAlphaOfLengthBetween(350, 500); + Exception e = expectThrows(CircuitBreakingException.class, () -> builder.copyBytes(new BytesRef(newValue))); + assertThat(e.getMessage(), equalTo("over test limit")); + } + } + public void testGrow() { testAgainstOracle(() -> new TestIteration() { - int length = between(1, 100); - byte b = randomByte(); + final int length = between(1, 100); + final byte b = randomByte(); @Override public int size() { @@ -118,10 +131,11 @@ private void testAgainstOracle(Supplier iterations) { assertThat(builder.bytesRefView(), equalTo(oracle.get())); while (true) { TestIteration iteration = iterations.get(); - boolean willResize = builder.length() + iteration.size() >= builder.bytes().length; + int targetSize = builder.length() + iteration.size(); + boolean willResize = targetSize >= builder.bytes().length; if (willResize) { long resizeMemoryUsage = BreakingBytesRefBuilder.SHALLOW_SIZE + ramForArray(builder.bytes().length); - resizeMemoryUsage += ramForArray(ArrayUtil.oversize(builder.length() + iteration.size(), Byte.BYTES)); + resizeMemoryUsage += ramForArray(ArrayUtil.oversize(targetSize, Byte.BYTES)); if (resizeMemoryUsage > limit) { Exception e = expectThrows(CircuitBreakingException.class, () -> iteration.applyToBuilder(builder)); assertThat(e.getMessage(), equalTo("over test limit")); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec index 951545a546826..be3ab86d3e04f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec @@ -40,10 +40,10 @@ double e() "double log(?base:integer|unsigned_long|long|double, number:integer|unsigned_long|long|double)" "double log10(number:double|integer|long|unsigned_long)" "keyword|text ltrim(string:keyword|text)" -"boolean|double|integer|long|date|ip max(field:boolean|double|integer|long|date|ip)" +"boolean|double|integer|long|date|ip|keyword|text|long|version max(field:boolean|double|integer|long|date|ip|keyword|text|long|version)" "double median(number:double|integer|long)" "double median_absolute_deviation(number:double|integer|long)" -"boolean|double|integer|long|date|ip min(field:boolean|double|integer|long|date|ip)" +"boolean|double|integer|long|date|ip|keyword|text|long|version min(field:boolean|double|integer|long|date|ip|keyword|text|long|version)" "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version mv_append(field1:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, field2:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version)" "double mv_avg(number:double|integer|long|unsigned_long)" "keyword mv_concat(string:text|keyword, delim:text|keyword)" @@ -163,10 +163,10 @@ locate |[string, substring, start] |["keyword|text", "keyword|te log |[base, number] |["integer|unsigned_long|long|double", "integer|unsigned_long|long|double"] |["Base of logarithm. If `null`\, the function returns `null`. If not provided\, this function returns the natural logarithm (base e) of a value.", "Numeric expression. If `null`\, the function returns `null`."] log10 |number |"double|integer|long|unsigned_long" |Numeric expression. If `null`, the function returns `null`. ltrim |string |"keyword|text" |String expression. If `null`, the function returns `null`. -max |field |"boolean|double|integer|long|date|ip" |[""] +max |field |"boolean|double|integer|long|date|ip|keyword|text|long|version" |[""] median |number |"double|integer|long" |[""] median_absolut|number |"double|integer|long" |[""] -min |field |"boolean|double|integer|long|date|ip" |[""] +min |field |"boolean|double|integer|long|date|ip|keyword|text|long|version" |[""] mv_append |[field1, field2] |["boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version", "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version"] | ["", ""] mv_avg |number |"double|integer|long|unsigned_long" |Multivalue expression. mv_concat |[string, delim] |["text|keyword", "text|keyword"] |[Multivalue expression., Delimiter.] @@ -411,10 +411,10 @@ locate |integer log |double |[true, false] |false |false log10 |double |false |false |false ltrim |"keyword|text" |false |false |false -max |"boolean|double|integer|long|date|ip" |false |false |true +max |"boolean|double|integer|long|date|ip|keyword|text|long|version" |false |false |true median |double |false |false |true median_absolut|double |false |false |true -min |"boolean|double|integer|long|date|ip" |false |false |true +min |"boolean|double|integer|long|date|ip|keyword|text|long|version" |false |false |true mv_append |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version" |[false, false] |false |false mv_avg |double |false |false |false mv_concat |keyword |[false, false] |false |false diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index eb373b6ddef6b..fc607edf4d212 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -76,6 +76,166 @@ fe82::cae2:65ff:fece:fec0 | fe82::cae2:65ff:fece:fec0 | fe82::cae2:65ff:fece:fec fe80::cae2:65ff:fece:feb9 | fe80::cae2:65ff:fece:feb9 | fe80::cae2:65ff:fece:feb9 | fe81::cae2:65ff:fece:feb9 | gamma ; +maxOfVersion +required_capability: agg_max_min_string_support +from apps +| eval x = version +| where id > 2 +| stats max(version), a = max(version), b = max(x), c = max(case(name == "iiiii", "100.0.0"::version, version)); + +max(version):version | a:version | b:version | c:version +bad | bad | bad | 100.0.0 +; + +maxOfVersionGrouping +required_capability: agg_max_min_string_support +from apps +| eval x = version +| where id > 2 +| stats max(version), a = max(version), b = max(x), c = max(case(name == "ccccc", "100.0.0"::version, version)) by name +| sort name asc +| limit 3; + +max(version):version | a:version | b:version | c:version | name:keyword +1.2.3.4 | 1.2.3.4 | 1.2.3.4 | 1.2.3.4 | aaaaa +2.3.4 | 2.3.4 | 2.3.4 | 100.0.0 | ccccc +2.12.0 | 2.12.0 | 2.12.0 | 2.12.0 | ddddd +; + +maxOfKeyword +required_capability: agg_max_min_string_support +from airports +| eval x = abbrev +| where scalerank >= 9 +| stats max(abbrev), a = max(abbrev), b = max(x), c = max(case(mv_first(type) == "small", "___"::keyword, abbrev)); + +max(abbrev):keyword | a:keyword | b:keyword | c:keyword +ZAH | ZAH | ZAH | ___ +; + +maxOfKeywordGrouping +required_capability: agg_max_min_string_support +from airports +| eval x = abbrev +| where scalerank >= 9 +| stats max(abbrev), a = max(abbrev), b = max(x), c = max(case(mv_first(type) == "small", "___"::keyword, abbrev)) by type +| sort type asc +| limit 4; + +max(abbrev):keyword | a:keyword | b:keyword | c:keyword | type:keyword +IXC | IXC | IXC | IXC | major +ZAH | ZAH | ZAH | ZAH | mid +VIBY | VIBY | VIBY | VIBY | military +OPQS | OPQS | OPQS | ___ | small +; + +maxOfText +required_capability: agg_max_min_string_support +from airports +| eval x = name +| where scalerank >= 9 +| stats max(name), a = max(name), b = max(x); + +max(name):text | a:text | b:text +Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l +; + +maxOfTextGrouping +required_capability: agg_max_min_string_support +from airports +| eval x = name +| where scalerank >= 9 +| stats max(name), a = max(name), b = max(x) by type +| sort type asc +| limit 4; + +max(name):text | a:text | b:text | type:keyword +Cheongju Int'l | Cheongju Int'l | Cheongju Int'l | major +Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l | mid +Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l | military +Sahnewal | Sahnewal | Sahnewal | small +; + +minOfVersion +required_capability: agg_max_min_string_support +from apps +| eval x = version +| where id > 2 +| stats min(version), a = min(version), b = min(x), c = min(case(name == "iiiii", "1.0"::version, version)); + +min(version):version | a:version | b:version | c:version +1.2.3.4 | 1.2.3.4 | 1.2.3.4 | 1.0 +; + +minOfVersionGrouping +required_capability: agg_max_min_string_support +from apps +| eval x = version +| where id > 2 +| stats min(version), a = min(version), b = min(x), c = min(case(name == "ccccc", "100.0.0"::version, version)) by name +| sort name asc +| limit 3; + +min(version):version | a:version | b:version | c:version | name:keyword +1.2.3.4 | 1.2.3.4 | 1.2.3.4 | 1.2.3.4 | aaaaa +2.3.4 | 2.3.4 | 2.3.4 | 100.0.0 | ccccc +2.12.0 | 2.12.0 | 2.12.0 | 2.12.0 | ddddd +; + +minOfKeyword +required_capability: agg_max_min_string_support +from airports +| eval x = abbrev +| where scalerank >= 9 +| stats min(abbrev), a = min(abbrev), b = min(x), c = max(case(mv_first(type) == "small", "___"::keyword, abbrev)); + +min(abbrev):keyword | a:keyword | b:keyword | c:keyword +AWZ | AWZ | AWZ | ___ +; + +minOfKeywordGrouping +required_capability: agg_max_min_string_support +from airports +| eval x = abbrev +| where scalerank >= 9 +| stats min(abbrev), a = min(abbrev), b = min(x), c = min(case(mv_first(type) == "small", "___"::keyword, abbrev)) by type +| sort type asc +| limit 4; + +min(abbrev):keyword | a:keyword | b:keyword | c:keyword | type:keyword +CJJ | CJJ | CJJ | CJJ | major +AWZ | AWZ | AWZ | AWZ | mid +GWL | GWL | GWL | GWL | military +LUH | LUH | LUH | ___ | small +; + +minOfText +required_capability: agg_max_min_string_support +from airports +| eval x = name +| where scalerank >= 9 +| stats min(name), a = min(name), b = min(x); + +min(name):text | a:text | b:text +Abdul Rachman Saleh | Abdul Rachman Saleh | Abdul Rachman Saleh +; + +minOfTextGrouping +required_capability: agg_max_min_string_support +from airports +| eval x = name +| where scalerank >= 9 +| stats min(name), a = min(name), b = min(x) by type +| sort type asc +| limit 4; + +min(name):text | a:text | b:text | type:keyword +Chandigarh Int'l | Chandigarh Int'l | Chandigarh Int'l | major +Abdul Rachman Saleh | Abdul Rachman Saleh | Abdul Rachman Saleh | mid +Abdul Rachman Saleh | Abdul Rachman Saleh | Abdul Rachman Saleh | military +Dhamial | Dhamial | Dhamial | small +; + minOfBooleanExpression required_capability: agg_max_min_boolean_support from employees diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 0477167cd7315..7937ae67c70bc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -67,6 +67,11 @@ public enum Cap { */ AGG_MAX_MIN_IP_SUPPORT, + /** + * Support for strings in aggregations {@code MAX} and {@code MIN}. + */ + AGG_MAX_MIN_STRING_SUPPORT, + /** * Support for booleans in {@code TOP} aggregation. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java index 22224628e23ad..e7f790f90803a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxBooleanAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.MaxBytesRefAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxIntAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxIpAggregatorFunctionSupplier; @@ -32,12 +33,15 @@ import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; +import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; +import static org.elasticsearch.xpack.esql.core.type.DataType.isRepresentable; +import static org.elasticsearch.xpack.esql.core.type.DataType.isSpatial; public class Max extends AggregateFunction implements ToAggregator, SurrogateExpression { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Max", Max::new); @FunctionInfo( - returnType = { "boolean", "double", "integer", "long", "date", "ip" }, + returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" }, description = "The maximum value of a field.", isAggregation = true, examples = { @@ -50,7 +54,13 @@ public class Max extends AggregateFunction implements ToAggregator, SurrogateExp tag = "docsStatsMaxNestedExpression" ) } ) - public Max(Source source, @Param(name = "field", type = { "boolean", "double", "integer", "long", "date", "ip" }) Expression field) { + public Max( + Source source, + @Param( + name = "field", + type = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" } + ) Expression field + ) { super(source, field); } @@ -77,13 +87,10 @@ public Max replaceChildren(List newChildren) { protected TypeResolution resolveType() { return TypeResolutions.isType( field(), - e -> e == DataType.BOOLEAN || e == DataType.DATETIME || e == DataType.IP || (e.isNumeric() && e != DataType.UNSIGNED_LONG), + t -> isRepresentable(t) && t != UNSIGNED_LONG && isSpatial(t) == false, sourceText(), DEFAULT, - "boolean", - "datetime", - "ip", - "numeric except unsigned_long or counter types" + "representable except unsigned_long and spatial types" ); } @@ -110,6 +117,9 @@ public final AggregatorFunctionSupplier supplier(List inputChannels) { if (type == DataType.IP) { return new MaxIpAggregatorFunctionSupplier(inputChannels); } + if (type == DataType.VERSION || type == DataType.KEYWORD || type == DataType.TEXT) { + return new MaxBytesRefAggregatorFunctionSupplier(inputChannels); + } throw EsqlIllegalArgumentException.illegalDataType(type); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java index 8e7bb6bc3e799..6866811995059 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinBooleanAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.MinBytesRefAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinIntAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinIpAggregatorFunctionSupplier; @@ -32,12 +33,15 @@ import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; +import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; +import static org.elasticsearch.xpack.esql.core.type.DataType.isRepresentable; +import static org.elasticsearch.xpack.esql.core.type.DataType.isSpatial; public class Min extends AggregateFunction implements ToAggregator, SurrogateExpression { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Min", Min::new); @FunctionInfo( - returnType = { "boolean", "double", "integer", "long", "date", "ip" }, + returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" }, description = "The minimum value of a field.", isAggregation = true, examples = { @@ -50,7 +54,13 @@ public class Min extends AggregateFunction implements ToAggregator, SurrogateExp tag = "docsStatsMinNestedExpression" ) } ) - public Min(Source source, @Param(name = "field", type = { "boolean", "double", "integer", "long", "date", "ip" }) Expression field) { + public Min( + Source source, + @Param( + name = "field", + type = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" } + ) Expression field + ) { super(source, field); } @@ -77,13 +87,10 @@ public Min replaceChildren(List newChildren) { protected TypeResolution resolveType() { return TypeResolutions.isType( field(), - e -> e == DataType.BOOLEAN || e == DataType.DATETIME || e == DataType.IP || (e.isNumeric() && e != DataType.UNSIGNED_LONG), + t -> isRepresentable(t) && t != UNSIGNED_LONG && isSpatial(t) == false, sourceText(), DEFAULT, - "boolean", - "datetime", - "ip", - "numeric except unsigned_long or counter types" + "representable except unsigned_long and spatial types" ); } @@ -110,6 +117,9 @@ public final AggregatorFunctionSupplier supplier(List inputChannels) { if (type == DataType.IP) { return new MinIpAggregatorFunctionSupplier(inputChannels); } + if (type == DataType.VERSION || type == DataType.KEYWORD || type == DataType.TEXT) { + return new MinBytesRefAggregatorFunctionSupplier(inputChannels); + } throw EsqlIllegalArgumentException.illegalDataType(type); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java index 213d7266a0b1e..60bf4be1d2b03 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java @@ -160,7 +160,7 @@ private static Stream, Tuple>> typeAndNames(Class if (NumericAggregate.class.isAssignableFrom(clazz)) { types = NUMERIC; } else if (Max.class.isAssignableFrom(clazz) || Min.class.isAssignableFrom(clazz)) { - types = List.of("Boolean", "Int", "Long", "Double", "Ip"); + types = List.of("Boolean", "Int", "Long", "Double", "Ip", "BytesRef"); } else if (clazz == Count.class) { types = List.of(""); // no extra type distinction } else if (SpatialAggregateFunction.class.isAssignableFrom(clazz)) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index f663002a51d68..3fb4b80d3974e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -1809,13 +1809,13 @@ public void testUnsupportedTypesInStats() { found value [x] type [unsigned_long] line 2:20: argument of [count_distinct(x)] must be [any exact type except unsigned_long, _source, or counter types],\ found value [x] type [unsigned_long] - line 2:39: argument of [max(x)] must be [boolean, datetime, ip or numeric except unsigned_long or counter types],\ + line 2:39: argument of [max(x)] must be [representable except unsigned_long and spatial types],\ found value [x] type [unsigned_long] line 2:47: argument of [median(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [unsigned_long] line 2:58: argument of [median_absolute_deviation(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [unsigned_long] - line 2:88: argument of [min(x)] must be [boolean, datetime, ip or numeric except unsigned_long or counter types],\ + line 2:88: argument of [min(x)] must be [representable except unsigned_long and spatial types],\ found value [x] type [unsigned_long] line 2:96: first argument of [percentile(x, 10)] must be [numeric except unsigned_long],\ found value [x] type [unsigned_long] @@ -1824,21 +1824,17 @@ public void testUnsupportedTypesInStats() { verifyUnsupported(""" row x = to_version("1.2") - | stats avg(x), max(x), median(x), median_absolute_deviation(x), min(x), percentile(x, 10), sum(x) + | stats avg(x), median(x), median_absolute_deviation(x), percentile(x, 10), sum(x) """, """ - Found 7 problems + Found 5 problems line 2:10: argument of [avg(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [version] - line 2:18: argument of [max(x)] must be [boolean, datetime, ip or numeric except unsigned_long or counter types],\ + line 2:18: argument of [median(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [version] - line 2:26: argument of [median(x)] must be [numeric except unsigned_long or counter types],\ + line 2:29: argument of [median_absolute_deviation(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [version] - line 2:37: argument of [median_absolute_deviation(x)] must be [numeric except unsigned_long or counter types],\ - found value [x] type [version] - line 2:67: argument of [min(x)] must be [boolean, datetime, ip or numeric except unsigned_long or counter types],\ - found value [x] type [version] - line 2:75: first argument of [percentile(x, 10)] must be [numeric except unsigned_long], found value [x] type [version] - line 2:94: argument of [sum(x)] must be [numeric except unsigned_long or counter types], found value [x] type [version]"""); + line 2:59: first argument of [percentile(x, 10)] must be [numeric except unsigned_long], found value [x] type [version] + line 2:78: argument of [sum(x)] must be [numeric except unsigned_long or counter types], found value [x] type [version]"""); } public void testInOnText() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index bdea0807a78c4..e2403505921a9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -766,7 +766,7 @@ public void testAggregateOnCounter() { error("FROM tests | STATS min(network.bytes_in)", tsdb), equalTo( "1:20: argument of [min(network.bytes_in)] must be" - + " [boolean, datetime, ip or numeric except unsigned_long or counter types]," + + " [representable except unsigned_long and spatial types]," + " found value [network.bytes_in] type [counter_long]" ) ); @@ -775,7 +775,7 @@ public void testAggregateOnCounter() { error("FROM tests | STATS max(network.bytes_in)", tsdb), equalTo( "1:20: argument of [max(network.bytes_in)] must be" - + " [boolean, datetime, ip or numeric except unsigned_long or counter types]," + + " [representable except unsigned_long and spatial types]," + " found value [network.bytes_in] type [counter_long]" ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java index 52e908a51dd1e..ce2bf7e262ae9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.esql.expression.function.AbstractAggregationTestCase; import org.elasticsearch.xpack.esql.expression.function.MultiRowTestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.versionfield.Version; import java.util.ArrayList; import java.util.Comparator; @@ -44,7 +45,10 @@ public static Iterable parameters() { MultiRowTestCaseSupplier.doubleCases(1, 1000, -Double.MAX_VALUE, Double.MAX_VALUE, true), MultiRowTestCaseSupplier.dateCases(1, 1000), MultiRowTestCaseSupplier.booleanCases(1, 1000), - MultiRowTestCaseSupplier.ipCases(1, 1000) + MultiRowTestCaseSupplier.ipCases(1, 1000), + MultiRowTestCaseSupplier.versionCases(1, 1000), + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.KEYWORD), + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.TEXT) ).flatMap(List::stream).map(MaxTests::makeSupplier).collect(Collectors.toCollection(() -> suppliers)); suppliers.addAll( @@ -109,14 +113,44 @@ public static Iterable parameters() { DataType.IP, equalTo(new BytesRef(InetAddressPoint.encode(InetAddresses.forString("127.0.0.1")))) ) - ) + ), + new TestCaseSupplier(List.of(DataType.KEYWORD), () -> { + var value = new BytesRef(randomAlphaOfLengthBetween(0, 50)); + return new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.KEYWORD, "field")), + "Max[field=Attribute[channel=0]]", + DataType.KEYWORD, + equalTo(value) + ); + }), + new TestCaseSupplier(List.of(DataType.TEXT), () -> { + var value = new BytesRef(randomAlphaOfLengthBetween(0, 50)); + return new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.TEXT, "field")), + "Max[field=Attribute[channel=0]]", + DataType.TEXT, + equalTo(value) + ); + }), + new TestCaseSupplier(List.of(DataType.VERSION), () -> { + var value = randomBoolean() + ? new Version(randomAlphaOfLengthBetween(1, 10)).toBytesRef() + : new Version(randomIntBetween(0, 100) + "." + randomIntBetween(0, 100) + "." + randomIntBetween(0, 100)) + .toBytesRef(); + return new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.VERSION, "field")), + "Max[field=Attribute[channel=0]]", + DataType.VERSION, + equalTo(value) + ); + }) ) ); return parameterSuppliersFromTypedDataWithDefaultChecks( suppliers, false, - (v, p) -> "boolean, datetime, ip or numeric except unsigned_long or counter types" + (v, p) -> "representable except unsigned_long and spatial types" ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java index 9514c817df497..7250072cd2003 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.esql.expression.function.AbstractAggregationTestCase; import org.elasticsearch.xpack.esql.expression.function.MultiRowTestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.versionfield.Version; import java.util.ArrayList; import java.util.Comparator; @@ -44,7 +45,10 @@ public static Iterable parameters() { MultiRowTestCaseSupplier.doubleCases(1, 1000, -Double.MAX_VALUE, Double.MAX_VALUE, true), MultiRowTestCaseSupplier.dateCases(1, 1000), MultiRowTestCaseSupplier.booleanCases(1, 1000), - MultiRowTestCaseSupplier.ipCases(1, 1000) + MultiRowTestCaseSupplier.ipCases(1, 1000), + MultiRowTestCaseSupplier.versionCases(1, 1000), + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.KEYWORD), + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.TEXT) ).flatMap(List::stream).map(MinTests::makeSupplier).collect(Collectors.toCollection(() -> suppliers)); suppliers.addAll( @@ -109,14 +113,44 @@ public static Iterable parameters() { DataType.IP, equalTo(new BytesRef(InetAddressPoint.encode(InetAddresses.forString("127.0.0.1")))) ) - ) + ), + new TestCaseSupplier(List.of(DataType.KEYWORD), () -> { + var value = new BytesRef(randomAlphaOfLengthBetween(0, 50)); + return new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.KEYWORD, "field")), + "Min[field=Attribute[channel=0]]", + DataType.KEYWORD, + equalTo(value) + ); + }), + new TestCaseSupplier(List.of(DataType.TEXT), () -> { + var value = new BytesRef(randomAlphaOfLengthBetween(0, 50)); + return new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.TEXT, "field")), + "Min[field=Attribute[channel=0]]", + DataType.TEXT, + equalTo(value) + ); + }), + new TestCaseSupplier(List.of(DataType.VERSION), () -> { + var value = randomBoolean() + ? new Version(randomAlphaOfLengthBetween(1, 10)).toBytesRef() + : new Version(randomIntBetween(0, 100) + "." + randomIntBetween(0, 100) + "." + randomIntBetween(0, 100)) + .toBytesRef(); + return new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.VERSION, "field")), + "Min[field=Attribute[channel=0]]", + DataType.VERSION, + equalTo(value) + ); + }) ) ); return parameterSuppliersFromTypedDataWithDefaultChecks( suppliers, false, - (v, p) -> "boolean, datetime, ip or numeric except unsigned_long or counter types" + (v, p) -> "representable except unsigned_long and spatial types" ); } From 65ce50c60a20918dc34183456c160eb7454a2479 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iv=C3=A1n=20Cea=20Fontenla?= Date: Tue, 20 Aug 2024 15:29:19 +0200 Subject: [PATCH 097/389] ESQL: Added mv_percentile function (#111749) - Added the `mv_percentile(values, percentile)` function - Used as a surrogate in the `percentile(column, percentile)` aggregation - Updated docs to specify that the surrogate _should_ be implemented if possible The same way as mv_median does, this yields exact results (Ignoring double operations error). For that, some decisions were made, specially in the long evaluator (Check the comments in context in `MvPercentile.java`) Closes https://github.com/elastic/elasticsearch/issues/111591 --- docs/changelog/111749.yaml | 6 + .../description/mv_percentile.asciidoc | 5 + .../functions/examples/mv_percentile.asciidoc | 13 + .../kibana/definition/mv_percentile.json | 173 +++++++ .../functions/kibana/docs/mv_percentile.md | 11 + .../functions/layout/mv_percentile.asciidoc | 15 + .../parameters/mv_percentile.asciidoc | 9 + .../functions/signature/mv_percentile.svg | 1 + .../functions/types/mv_percentile.asciidoc | 17 + .../compute/data/BlockUtils.java | 2 + .../src/main/resources/meta.csv-spec | 6 +- .../src/main/resources/mv_percentile.csv-spec | 163 ++++++ .../main/resources/stats_percentile.csv-spec | 36 ++ .../MvPercentileDoubleEvaluator.java | 125 +++++ .../MvPercentileIntegerEvaluator.java | 126 +++++ .../multivalue/MvPercentileLongEvaluator.java | 126 +++++ .../xpack/esql/action/EsqlCapabilities.java | 5 + .../function/EsqlFunctionRegistry.java | 2 + .../function/aggregate/Percentile.java | 16 +- .../function/aggregate/package-info.java | 18 +- .../AbstractMultivalueFunction.java | 1 + .../scalar/multivalue/MvPercentile.java | 446 +++++++++++++++++ .../function/AbstractAggregationTestCase.java | 7 +- .../AbstractScalarFunctionTestCase.java | 27 + .../function/MultivalueTestCaseSupplier.java | 325 ++++++++++++ .../expression/function/TestCaseSupplier.java | 2 +- .../function/aggregate/PercentileTests.java | 2 +- .../scalar/multivalue/MvPercentileTests.java | 466 ++++++++++++++++++ 28 files changed, 2135 insertions(+), 16 deletions(-) create mode 100644 docs/changelog/111749.yaml create mode 100644 docs/reference/esql/functions/description/mv_percentile.asciidoc create mode 100644 docs/reference/esql/functions/examples/mv_percentile.asciidoc create mode 100644 docs/reference/esql/functions/kibana/definition/mv_percentile.json create mode 100644 docs/reference/esql/functions/kibana/docs/mv_percentile.md create mode 100644 docs/reference/esql/functions/layout/mv_percentile.asciidoc create mode 100644 docs/reference/esql/functions/parameters/mv_percentile.asciidoc create mode 100644 docs/reference/esql/functions/signature/mv_percentile.svg create mode 100644 docs/reference/esql/functions/types/mv_percentile.asciidoc create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_percentile.csv-spec create mode 100644 x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileDoubleEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileIntegerEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileLongEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultivalueTestCaseSupplier.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java diff --git a/docs/changelog/111749.yaml b/docs/changelog/111749.yaml new file mode 100644 index 0000000000000..77e0c65005dd6 --- /dev/null +++ b/docs/changelog/111749.yaml @@ -0,0 +1,6 @@ +pr: 111749 +summary: "ESQL: Added `mv_percentile` function" +area: ES|QL +type: feature +issues: + - 111591 diff --git a/docs/reference/esql/functions/description/mv_percentile.asciidoc b/docs/reference/esql/functions/description/mv_percentile.asciidoc new file mode 100644 index 0000000000000..3e731f6525cec --- /dev/null +++ b/docs/reference/esql/functions/description/mv_percentile.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Converts a multivalued field into a single valued field containing the value at which a certain percentage of observed values occur. diff --git a/docs/reference/esql/functions/examples/mv_percentile.asciidoc b/docs/reference/esql/functions/examples/mv_percentile.asciidoc new file mode 100644 index 0000000000000..9b20a5bef5e0d --- /dev/null +++ b/docs/reference/esql/functions/examples/mv_percentile.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/mv_percentile.csv-spec[tag=example] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/mv_percentile.csv-spec[tag=example-result] +|=== + diff --git a/docs/reference/esql/functions/kibana/definition/mv_percentile.json b/docs/reference/esql/functions/kibana/definition/mv_percentile.json new file mode 100644 index 0000000000000..dad611122f0db --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/mv_percentile.json @@ -0,0 +1,173 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "mv_percentile", + "description" : "Converts a multivalued field into a single valued field containing the value at which a certain percentage of observed values occur.", + "signatures" : [ + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "double", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "integer", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "long", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "double", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "integer", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "long", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "double", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "integer", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "long", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "long" + } + ], + "examples" : [ + "ROW values = [5, 5, 10, 12, 5000]\n| EVAL p50 = MV_PERCENTILE(values, 50), median = MV_MEDIAN(values)" + ] +} diff --git a/docs/reference/esql/functions/kibana/docs/mv_percentile.md b/docs/reference/esql/functions/kibana/docs/mv_percentile.md new file mode 100644 index 0000000000000..560a0aefa1dc3 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/mv_percentile.md @@ -0,0 +1,11 @@ + + +### MV_PERCENTILE +Converts a multivalued field into a single valued field containing the value at which a certain percentage of observed values occur. + +``` +ROW values = [5, 5, 10, 12, 5000] +| EVAL p50 = MV_PERCENTILE(values, 50), median = MV_MEDIAN(values) +``` diff --git a/docs/reference/esql/functions/layout/mv_percentile.asciidoc b/docs/reference/esql/functions/layout/mv_percentile.asciidoc new file mode 100644 index 0000000000000..a86c4a136b5cd --- /dev/null +++ b/docs/reference/esql/functions/layout/mv_percentile.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-mv_percentile]] +=== `MV_PERCENTILE` + +*Syntax* + +[.text-center] +image::esql/functions/signature/mv_percentile.svg[Embedded,opts=inline] + +include::../parameters/mv_percentile.asciidoc[] +include::../description/mv_percentile.asciidoc[] +include::../types/mv_percentile.asciidoc[] +include::../examples/mv_percentile.asciidoc[] diff --git a/docs/reference/esql/functions/parameters/mv_percentile.asciidoc b/docs/reference/esql/functions/parameters/mv_percentile.asciidoc new file mode 100644 index 0000000000000..57804185e191a --- /dev/null +++ b/docs/reference/esql/functions/parameters/mv_percentile.asciidoc @@ -0,0 +1,9 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`number`:: +Multivalue expression. + +`percentile`:: +The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead. diff --git a/docs/reference/esql/functions/signature/mv_percentile.svg b/docs/reference/esql/functions/signature/mv_percentile.svg new file mode 100644 index 0000000000000..b4d623636572f --- /dev/null +++ b/docs/reference/esql/functions/signature/mv_percentile.svg @@ -0,0 +1 @@ +MV_PERCENTILE(number,percentile) \ No newline at end of file diff --git a/docs/reference/esql/functions/types/mv_percentile.asciidoc b/docs/reference/esql/functions/types/mv_percentile.asciidoc new file mode 100644 index 0000000000000..99a58b9c3d2e2 --- /dev/null +++ b/docs/reference/esql/functions/types/mv_percentile.asciidoc @@ -0,0 +1,17 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +number | percentile | result +double | double | double +double | integer | double +double | long | double +integer | double | integer +integer | integer | integer +integer | long | integer +long | double | long +long | integer | long +long | long | long +|=== diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java index a697a3f6c15fa..3df389135e9d3 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java @@ -87,6 +87,8 @@ public static Block[] fromListRow(BlockFactory blockFactory, List row, i } else { wrapper.builder.mvOrdering(Block.MvOrdering.DEDUPLICATED_UNORDERD); } + } else if (isAscending(listVal) && random.nextBoolean()) { + wrapper.builder.mvOrdering(Block.MvOrdering.SORTED_ASCENDING); } blocks[i] = wrapper.builder.build(); } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec index be3ab86d3e04f..f1f66a9cb990c 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec @@ -54,6 +54,7 @@ double e() "boolean|date|date_nanos|double|integer|ip|keyword|long|text|unsigned_long|version mv_max(field:boolean|date|date_nanos|double|integer|ip|keyword|long|text|unsigned_long|version)" "double|integer|long|unsigned_long mv_median(number:double|integer|long|unsigned_long)" "boolean|date|date_nanos|double|integer|ip|keyword|long|text|unsigned_long|version mv_min(field:boolean|date|date_nanos|double|integer|ip|keyword|long|text|unsigned_long|version)" +"double|integer|long mv_percentile(number:double|integer|long, percentile:double|integer|long)" "double mv_pseries_weighted_sum(number:double, p:double)" "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version mv_slice(field:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, start:integer, ?end:integer)" "boolean|date|double|integer|ip|keyword|long|text|version mv_sort(field:boolean|date|double|integer|ip|keyword|long|text|version, ?order:keyword)" @@ -177,6 +178,7 @@ mv_last |field |"boolean|cartesian_point|car mv_max |field |"boolean|date|date_nanos|double|integer|ip|keyword|long|text|unsigned_long|version" |Multivalue expression. mv_median |number |"double|integer|long|unsigned_long" |Multivalue expression. mv_min |field |"boolean|date|date_nanos|double|integer|ip|keyword|long|text|unsigned_long|version" |Multivalue expression. +mv_percentile |[number, percentile] |["double|integer|long", "double|integer|long"] |[Multivalue expression., The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead.] mv_pseries_wei|[number, p] |[double, double] |[Multivalue expression., It is a constant number that represents the 'p' parameter in the P-Series. It impacts every element's contribution to the weighted sum.] mv_slice |[field, start, end] |["boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version", integer, integer]|[Multivalue expression. If `null`\, the function returns `null`., Start position. If `null`\, the function returns `null`. The start argument can be negative. An index of -1 is used to specify the last value in the list., End position(included). Optional; if omitted\, the position at `start` is returned. The end argument can be negative. An index of -1 is used to specify the last value in the list.] mv_sort |[field, order] |["boolean|date|double|integer|ip|keyword|long|text|version", keyword] |[Multivalue expression. If `null`\, the function returns `null`., Sort order. The valid options are ASC and DESC\, the default is ASC.] @@ -300,6 +302,7 @@ mv_last |Converts a multivalue expression into a single valued column cont mv_max |Converts a multivalued expression into a single valued column containing the maximum value. mv_median |Converts a multivalued field into a single valued field containing the median value. mv_min |Converts a multivalued expression into a single valued column containing the minimum value. +mv_percentile |Converts a multivalued field into a single valued field containing the value at which a certain percentage of observed values occur. mv_pseries_wei|Converts a multivalued expression into a single-valued column by multiplying every element on the input list by its corresponding term in P-Series and computing the sum. mv_slice |Returns a subset of the multivalued field using the start and end index values. mv_sort |Sorts a multivalued field in lexicographical order. @@ -425,6 +428,7 @@ mv_last |"boolean|cartesian_point|cartesian_shape|date|date_nanos|double|g mv_max |"boolean|date|date_nanos|double|integer|ip|keyword|long|text|unsigned_long|version" |false |false |false mv_median |"double|integer|long|unsigned_long" |false |false |false mv_min |"boolean|date|date_nanos|double|integer|ip|keyword|long|text|unsigned_long|version" |false |false |false +mv_percentile |"double|integer|long" |[false, false] |false |false mv_pseries_wei|"double" |[false, false] |false |false mv_slice |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version" |[false, false, true] |false |false mv_sort |"boolean|date|double|integer|ip|keyword|long|text|version" |[false, true] |false |false @@ -504,5 +508,5 @@ countFunctions#[skip:-8.15.99] meta functions | stats a = count(*), b = count(*), c = count(*) | mv_expand c; a:long | b:long | c:long -114 | 114 | 114 +115 | 115 | 115 ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_percentile.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_percentile.csv-spec new file mode 100644 index 0000000000000..e22b40c7ecad8 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_percentile.csv-spec @@ -0,0 +1,163 @@ +default +required_capability: fn_mv_percentile + +// tag::example[] +ROW values = [5, 5, 10, 12, 5000] +| EVAL p50 = MV_PERCENTILE(values, 50), median = MV_MEDIAN(values) +// end::example[] +; + +// tag::example-result[] +values:integer | p50:integer | median:integer +[5, 5, 10, 12, 5000] | 10 | 10 +// end::example-result[] +; + +p0 +required_capability: fn_mv_percentile + +ROW a = [5, 5, 10, 12, 5000] +| EVAL pInt = MV_PERCENTILE(a, 0), pLong = MV_PERCENTILE(a, 0::long), pDouble = MV_PERCENTILE(a, 0.0) +| KEEP pInt, pLong, pDouble +; + +pInt:integer | pLong:integer | pDouble:integer +5 | 5 | 5 +; + +p100 +required_capability: fn_mv_percentile + +ROW a = [5, 5, 10, 12, 5000] +| EVAL pInt = MV_PERCENTILE(a, 100), pLong = MV_PERCENTILE(a, 100::long), pDouble = MV_PERCENTILE(a, 100.0) +| KEEP pInt, pLong, pDouble +; + +pInt:integer | pLong:integer | pDouble:integer +5000 | 5000 | 5000 +; + +fractionInt +required_capability: fn_mv_percentile + +ROW a = [0, 10] +| EVAL pInt = MV_PERCENTILE(a, 75), pLong = MV_PERCENTILE(a, 75::long), pDouble = MV_PERCENTILE(a, 75.0) +| KEEP pInt, pLong, pDouble +; + +pInt:integer | pLong:integer | pDouble:integer +7 | 7 | 7 +; + +fractionLong +required_capability: fn_mv_percentile + +ROW a = to_long([0, 10]) +| EVAL pInt = MV_PERCENTILE(a, 75), pLong = MV_PERCENTILE(a, 75::long), pDouble = MV_PERCENTILE(a, 75.0) +| KEEP pInt, pLong, pDouble +; + +pInt:long | pLong:long | pDouble:long +7 | 7 | 7 +; + +fractionDouble +required_capability: fn_mv_percentile + +ROW a = [0., 10.] +| EVAL pInt = MV_PERCENTILE(a, 75), pLong = MV_PERCENTILE(a, 75::long), pDouble = MV_PERCENTILE(a, 75.0) +| KEEP pInt, pLong, pDouble +; + +pInt:double | pLong:double | pDouble:double +7.5 | 7.5 | 7.5 +; + +singleValue +required_capability: fn_mv_percentile + +ROW integer = 5, long = 5::long, double = 5.0 +| EVAL + integer = MV_PERCENTILE(integer, 75), + long = MV_PERCENTILE(long, 75), + double = MV_PERCENTILE(double, 75) +; + +integer:integer | long:long | double:double +5 | 5 | 5 +; + +fromIndex +required_capability: fn_mv_percentile + +FROM employees +| EVAL + integer = MV_PERCENTILE(salary_change.int, 75), + long = MV_PERCENTILE(salary_change.long, 75), + double = MV_PERCENTILE(salary_change, 75) +| KEEP integer, long, double +| SORT double +| LIMIT 3 +; + +integer:integer | long:long | double:double +-8 | -8 | -8.46 +-7 | -7 | -7.08 +-6 | -6 | -6.9 +; + +fromIndexPercentile +required_capability: fn_mv_percentile + +FROM employees +| SORT emp_no +| LIMIT 1 +| EVAL + integer = MV_PERCENTILE(salary_change.int, languages), + long = MV_PERCENTILE(salary_change.long, languages.long), + double = MV_PERCENTILE(salary_change, height), + null_value = MV_PERCENTILE(salary_change, emp_no) +| KEEP integer, long, double, null_value +; +warning:Line 8:14: evaluation of [MV_PERCENTILE(salary_change, emp_no)] failed, treating result as null. Only first 20 failures recorded. +warning:Line 8:14: java.lang.IllegalArgumentException: Percentile parameter must be a number between 0 and 100, found [10001.0] + +integer:integer | long:long | double:double | null_value:double +1 | 1 | 1.19 | null +; + +multipleExpressions +required_capability: fn_mv_percentile + +ROW x = [0, 5, 10] +| EVAL + MV_PERCENTILE(x, 75), + a = MV_PERCENTILE(x, 75), + b = MV_PERCENTILE(TO_DOUBLE([0, 5, 10]), 75), + c = MV_PERCENTILE(CASE(true, x, [0, 1]), 75) +; + +x:integer | MV_PERCENTILE(x, 75):integer | a:integer | b:double | c:integer +[0, 5, 10] | 7 | 7 | 7.5 | 7 +; + +nullsAndFolds +required_capability: fn_mv_percentile + +ROW x = [5, 5, 10, 12, 5000], n = null, y = 50 +| EVAL evalNull = null / 2, evalValue = 31 + 1 +| LIMIT 1 +| EVAL + a = mv_percentile(y, 90), + b = mv_percentile(x, y), + c = mv_percentile(null, null), + d = mv_percentile(null, y), + e = mv_percentile(evalNull, y), + f = mv_percentile(evalValue, y), + g = mv_percentile(n, y) +| KEEP a, b, c, d, e, f, g +; + +a:integer | b:integer | c:null | d:null | e:integer | f:integer | g:null +50 | 10 | null | null | null | 32 | null +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec index db386e877b9c3..2ac7a0cf6217a 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec @@ -195,3 +195,39 @@ p80_max_salary_change:double 12.132 // end::docsStatsPercentileNestedExpression-result[] ; + +constantsFrom +required_capability: fn_mv_percentile +from employees +| eval single = 7, mv = [1, 7, 10] +| stats + eval_single = percentile(single, 50), + eval_mv = percentile(mv, 50), + constant_single = percentile(5, 50), + constant_mv = percentile([1, 5, 10], 50); + +eval_single:double | eval_mv:double | constant_single:double | constant_mv:double +7 | 7 | 5 | 5 +; + +constantsRow +required_capability: fn_mv_percentile +row single=7, mv=[1, 7, 10] +| stats + eval_single = percentile(single, 50), + eval_mv = percentile(mv, 50), + constant_single = percentile(5, 50), + constant_mv = percentile([1, 5, 10], 50); + +eval_single:double | eval_mv:double | constant_single:double | constant_mv:double +7 | 7 | 5 | 5 +; + +singleConstant +required_capability: fn_mv_percentile +row a=0 +| stats constant_single = percentile(5, 50); + +constant_single:double +5 +; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileDoubleEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileDoubleEvaluator.java new file mode 100644 index 0000000000000..dd370e90b2c86 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileDoubleEvaluator.java @@ -0,0 +1,125 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import java.util.function.Function; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.Warnings; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvPercentile}. + * This class is generated. Do not edit it. + */ +public final class MvPercentileDoubleEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator values; + + private final EvalOperator.ExpressionEvaluator percentile; + + private final MvPercentile.DoubleSortingScratch scratch; + + private final DriverContext driverContext; + + public MvPercentileDoubleEvaluator(Source source, EvalOperator.ExpressionEvaluator values, + EvalOperator.ExpressionEvaluator percentile, MvPercentile.DoubleSortingScratch scratch, + DriverContext driverContext) { + this.values = values; + this.percentile = percentile; + this.scratch = scratch; + this.driverContext = driverContext; + this.warnings = Warnings.createWarnings(driverContext.warningsMode(), source); + } + + @Override + public Block eval(Page page) { + try (DoubleBlock valuesBlock = (DoubleBlock) values.eval(page)) { + try (DoubleBlock percentileBlock = (DoubleBlock) percentile.eval(page)) { + return eval(page.getPositionCount(), valuesBlock, percentileBlock); + } + } + } + + public DoubleBlock eval(int positionCount, DoubleBlock valuesBlock, DoubleBlock percentileBlock) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!valuesBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (percentileBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (percentileBlock.getValueCount(p) != 1) { + if (percentileBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvPercentile.process(result, p, valuesBlock, percentileBlock.getDouble(percentileBlock.getFirstValueIndex(p)), this.scratch); + } catch (IllegalArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvPercentileDoubleEvaluator[" + "values=" + values + ", percentile=" + percentile + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(values, percentile); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory values; + + private final EvalOperator.ExpressionEvaluator.Factory percentile; + + private final Function scratch; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory values, + EvalOperator.ExpressionEvaluator.Factory percentile, + Function scratch) { + this.source = source; + this.values = values; + this.percentile = percentile; + this.scratch = scratch; + } + + @Override + public MvPercentileDoubleEvaluator get(DriverContext context) { + return new MvPercentileDoubleEvaluator(source, values.get(context), percentile.get(context), scratch.apply(context), context); + } + + @Override + public String toString() { + return "MvPercentileDoubleEvaluator[" + "values=" + values + ", percentile=" + percentile + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileIntegerEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileIntegerEvaluator.java new file mode 100644 index 0000000000000..93dda414c7b33 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileIntegerEvaluator.java @@ -0,0 +1,126 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import java.util.function.Function; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.Warnings; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvPercentile}. + * This class is generated. Do not edit it. + */ +public final class MvPercentileIntegerEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator values; + + private final EvalOperator.ExpressionEvaluator percentile; + + private final MvPercentile.IntSortingScratch scratch; + + private final DriverContext driverContext; + + public MvPercentileIntegerEvaluator(Source source, EvalOperator.ExpressionEvaluator values, + EvalOperator.ExpressionEvaluator percentile, MvPercentile.IntSortingScratch scratch, + DriverContext driverContext) { + this.values = values; + this.percentile = percentile; + this.scratch = scratch; + this.driverContext = driverContext; + this.warnings = Warnings.createWarnings(driverContext.warningsMode(), source); + } + + @Override + public Block eval(Page page) { + try (IntBlock valuesBlock = (IntBlock) values.eval(page)) { + try (DoubleBlock percentileBlock = (DoubleBlock) percentile.eval(page)) { + return eval(page.getPositionCount(), valuesBlock, percentileBlock); + } + } + } + + public IntBlock eval(int positionCount, IntBlock valuesBlock, DoubleBlock percentileBlock) { + try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!valuesBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (percentileBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (percentileBlock.getValueCount(p) != 1) { + if (percentileBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvPercentile.process(result, p, valuesBlock, percentileBlock.getDouble(percentileBlock.getFirstValueIndex(p)), this.scratch); + } catch (IllegalArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvPercentileIntegerEvaluator[" + "values=" + values + ", percentile=" + percentile + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(values, percentile); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory values; + + private final EvalOperator.ExpressionEvaluator.Factory percentile; + + private final Function scratch; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory values, + EvalOperator.ExpressionEvaluator.Factory percentile, + Function scratch) { + this.source = source; + this.values = values; + this.percentile = percentile; + this.scratch = scratch; + } + + @Override + public MvPercentileIntegerEvaluator get(DriverContext context) { + return new MvPercentileIntegerEvaluator(source, values.get(context), percentile.get(context), scratch.apply(context), context); + } + + @Override + public String toString() { + return "MvPercentileIntegerEvaluator[" + "values=" + values + ", percentile=" + percentile + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileLongEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileLongEvaluator.java new file mode 100644 index 0000000000000..10d0b7c3283b2 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileLongEvaluator.java @@ -0,0 +1,126 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import java.util.function.Function; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.Warnings; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvPercentile}. + * This class is generated. Do not edit it. + */ +public final class MvPercentileLongEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator values; + + private final EvalOperator.ExpressionEvaluator percentile; + + private final MvPercentile.LongSortingScratch scratch; + + private final DriverContext driverContext; + + public MvPercentileLongEvaluator(Source source, EvalOperator.ExpressionEvaluator values, + EvalOperator.ExpressionEvaluator percentile, MvPercentile.LongSortingScratch scratch, + DriverContext driverContext) { + this.values = values; + this.percentile = percentile; + this.scratch = scratch; + this.driverContext = driverContext; + this.warnings = Warnings.createWarnings(driverContext.warningsMode(), source); + } + + @Override + public Block eval(Page page) { + try (LongBlock valuesBlock = (LongBlock) values.eval(page)) { + try (DoubleBlock percentileBlock = (DoubleBlock) percentile.eval(page)) { + return eval(page.getPositionCount(), valuesBlock, percentileBlock); + } + } + } + + public LongBlock eval(int positionCount, LongBlock valuesBlock, DoubleBlock percentileBlock) { + try(LongBlock.Builder result = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + boolean allBlocksAreNulls = true; + if (!valuesBlock.isNull(p)) { + allBlocksAreNulls = false; + } + if (percentileBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (percentileBlock.getValueCount(p) != 1) { + if (percentileBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (allBlocksAreNulls) { + result.appendNull(); + continue position; + } + try { + MvPercentile.process(result, p, valuesBlock, percentileBlock.getDouble(percentileBlock.getFirstValueIndex(p)), this.scratch); + } catch (IllegalArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "MvPercentileLongEvaluator[" + "values=" + values + ", percentile=" + percentile + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(values, percentile); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory values; + + private final EvalOperator.ExpressionEvaluator.Factory percentile; + + private final Function scratch; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory values, + EvalOperator.ExpressionEvaluator.Factory percentile, + Function scratch) { + this.source = source; + this.values = values; + this.percentile = percentile; + this.scratch = scratch; + } + + @Override + public MvPercentileLongEvaluator get(DriverContext context) { + return new MvPercentileLongEvaluator(source, values.get(context), percentile.get(context), scratch.apply(context), context); + } + + @Override + public String toString() { + return "MvPercentileLongEvaluator[" + "values=" + values + ", percentile=" + percentile + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 7937ae67c70bc..913eb382a5daf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -37,6 +37,11 @@ public enum Cap { */ FN_MV_APPEND, + /** + * Support for {@code MV_PERCENTILE} function. + */ + FN_MV_PERCENTILE, + /** * Support for function {@code IP_PREFIX}. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index 6e23f4445b564..c64cbdbd2a9ed 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -96,6 +96,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMedian; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvPSeriesWeightedSum; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvPercentile; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSlice; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSort; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSum; @@ -362,6 +363,7 @@ private FunctionDefinition[][] functions() { def(MvMax.class, MvMax::new, "mv_max"), def(MvMedian.class, MvMedian::new, "mv_median"), def(MvMin.class, MvMin::new, "mv_min"), + def(MvPercentile.class, MvPercentile::new, "mv_percentile"), def(MvPSeriesWeightedSum.class, MvPSeriesWeightedSum::new, "mv_pseries_weighted_sum"), def(MvSort.class, MvSort::new, "mv_sort"), def(MvSlice.class, MvSlice::new, "mv_slice"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java index 54cebc7daad5d..0d5dd4b66501c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java @@ -18,9 +18,12 @@ import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvPercentile; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import java.io.IOException; @@ -31,7 +34,7 @@ import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isFoldable; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; -public class Percentile extends NumericAggregate { +public class Percentile extends NumericAggregate implements SurrogateExpression { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( Expression.class, "Percentile", @@ -152,4 +155,15 @@ protected AggregatorFunctionSupplier doubleSupplier(List inputChannels) private int percentileValue() { return ((Number) percentile.fold()).intValue(); } + + @Override + public Expression surrogate() { + var field = field(); + + if (field.foldable()) { + return new MvPercentile(source(), new ToDouble(source(), field), percentile()); + } + + return null; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java index 055e34ad5a633..1c10c7d2fa9ef 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java @@ -68,18 +68,18 @@ * {@code dataType}: This will return the datatype of your function. * May be based on its current parameters. * - * - * - * Finally, you may want to implement some interfaces. - * Check their JavaDocs to see if they are suitable for your function: - *
    - *
  • - * {@link org.elasticsearch.xpack.esql.planner.ToAggregator}: (More information about aggregators below) - *
  • *
  • - * {@link org.elasticsearch.xpack.esql.expression.SurrogateExpression} + * Implement {@link org.elasticsearch.xpack.esql.expression.SurrogateExpression}, and its required + * {@link org.elasticsearch.xpack.esql.expression.SurrogateExpression#surrogate()} method. + *

    + * It's used to be able to fold the aggregation when it receives only literals, + * or when the aggregation can be simplified. + *

    *
  • *
+ * + * Finally, implement {@link org.elasticsearch.xpack.esql.planner.ToAggregator} (More information about aggregators below). + * The only case when this interface is not required is when it always returns another function in its surrogate. * *
  • * To introduce your aggregation to the engine: diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java index 90810d282ca52..cb0f9fdd8d5db 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java @@ -44,6 +44,7 @@ public static List getNamedWriteables() { MvMax.ENTRY, MvMedian.ENTRY, MvMin.ENTRY, + MvPercentile.ENTRY, MvPSeriesWeightedSum.ENTRY, MvSlice.ENTRY, MvSort.ENTRY, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java new file mode 100644 index 0000000000000..b1e710b9b2a40 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java @@ -0,0 +1,446 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.apache.lucene.util.ArrayUtil; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.ann.Fixed; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.Example; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cast; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; + +import java.io.IOException; +import java.math.BigDecimal; +import java.util.Arrays; +import java.util.List; +import java.util.function.Function; + +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; +import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; +import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; + +public class MvPercentile extends EsqlScalarFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "MvPercentile", + MvPercentile::new + ); + + /** + * 2^52 is the smallest integer where it and all smaller integers can be represented exactly as double + */ + private static final double MAX_SAFE_LONG_DOUBLE = Double.longBitsToDouble(0x4330000000000000L); + + private final Expression field; + private final Expression percentile; + + @FunctionInfo( + returnType = { "double", "integer", "long" }, + description = "Converts a multivalued field into a single valued field containing " + + "the value at which a certain percentage of observed values occur.", + examples = @Example(file = "mv_percentile", tag = "example") + ) + public MvPercentile( + Source source, + @Param(name = "number", type = { "double", "integer", "long" }, description = "Multivalue expression.") Expression field, + @Param( + name = "percentile", + type = { "double", "integer", "long" }, + description = "The percentile to calculate. Must be a number between 0 and 100. " + + "Numbers out of range will return a null instead." + ) Expression percentile + ) { + super(source, List.of(field, percentile)); + this.field = field; + this.percentile = percentile; + } + + private MvPercentile(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), in.readNamedWriteable(Expression.class)); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(field); + out.writeNamedWriteable(percentile); + } + + @Override + protected Expression.TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + return isType(field, dt -> dt.isNumeric() && dt != UNSIGNED_LONG, sourceText(), FIRST, "numeric except unsigned_long").and( + isType(percentile, dt -> dt.isNumeric() && dt != UNSIGNED_LONG, sourceText(), SECOND, "numeric except unsigned_long") + ); + } + + @Override + public boolean foldable() { + return field.foldable() && percentile.foldable(); + } + + public final Expression field() { + return field; + } + + @Override + public DataType dataType() { + return field.dataType(); + } + + @Override + public final ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { + var fieldEval = toEvaluator.apply(field); + var percentileEval = Cast.cast(source(), percentile.dataType(), DOUBLE, toEvaluator.apply(percentile)); + + return switch (PlannerUtils.toElementType(field.dataType())) { + case INT -> new MvPercentileIntegerEvaluator.Factory(source(), fieldEval, percentileEval, (d) -> new IntSortingScratch()); + case LONG -> new MvPercentileLongEvaluator.Factory(source(), fieldEval, percentileEval, (d) -> new LongSortingScratch()); + case DOUBLE -> new MvPercentileDoubleEvaluator.Factory(source(), fieldEval, percentileEval, (d) -> new DoubleSortingScratch()); + default -> throw EsqlIllegalArgumentException.illegalDataType(field.dataType()); + }; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new MvPercentile(source(), newChildren.get(0), newChildren.get(1)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, MvPercentile::new, field, percentile); + } + + static class DoubleSortingScratch { + private static final double[] EMPTY = new double[0]; + + public double[] values = EMPTY; + } + + static class IntSortingScratch { + private static final int[] EMPTY = new int[0]; + + public int[] values = EMPTY; + } + + static class LongSortingScratch { + private static final long[] EMPTY = new long[0]; + + public long[] values = EMPTY; + } + + // Evaluators + + @Evaluator(extraName = "Double", warnExceptions = IllegalArgumentException.class) + static void process( + DoubleBlock.Builder builder, + int position, + DoubleBlock values, + double percentile, + @Fixed(includeInToString = false, build = true) DoubleSortingScratch scratch + ) { + int valueCount = values.getValueCount(position); + int firstValueIndex = values.getFirstValueIndex(position); + + if (valueCount == 0) { + builder.appendNull(); + return; + } + + if (percentile < 0 || percentile > 100) { + throw new IllegalArgumentException("Percentile parameter must be a number between 0 and 100, found [" + percentile + "]"); + } + + builder.appendDouble(calculateDoublePercentile(values, firstValueIndex, valueCount, percentile, scratch)); + } + + @Evaluator(extraName = "Integer", warnExceptions = IllegalArgumentException.class) + static void process( + IntBlock.Builder builder, + int position, + IntBlock values, + double percentile, + @Fixed(includeInToString = false, build = true) IntSortingScratch scratch + ) { + int valueCount = values.getValueCount(position); + int firstValueIndex = values.getFirstValueIndex(position); + + if (valueCount == 0) { + builder.appendNull(); + return; + } + + if (percentile < 0 || percentile > 100) { + throw new IllegalArgumentException("Percentile parameter must be a number between 0 and 100, found [" + percentile + "]"); + } + + builder.appendInt(calculateIntPercentile(values, firstValueIndex, valueCount, percentile, scratch)); + } + + @Evaluator(extraName = "Long", warnExceptions = IllegalArgumentException.class) + static void process( + LongBlock.Builder builder, + int position, + LongBlock values, + double percentile, + @Fixed(includeInToString = false, build = true) LongSortingScratch scratch + ) { + int valueCount = values.getValueCount(position); + int firstValueIndex = values.getFirstValueIndex(position); + + if (valueCount == 0) { + builder.appendNull(); + return; + } + + if (percentile < 0 || percentile > 100) { + throw new IllegalArgumentException("Percentile parameter must be a number between 0 and 100, found [" + percentile + "]"); + } + + builder.appendLong(calculateLongPercentile(values, firstValueIndex, valueCount, percentile, scratch)); + } + + // Percentile calculators + + private static double calculateDoublePercentile( + DoubleBlock valuesBlock, + int firstValueIndex, + int valueCount, + double percentile, + DoubleSortingScratch scratch + ) { + if (valueCount == 1) { + return valuesBlock.getDouble(firstValueIndex); + } + + var p = percentile / 100.0; + var index = p * (valueCount - 1); + var lowerIndex = (int) index; + var upperIndex = lowerIndex + 1; + var fraction = index - lowerIndex; + + if (valuesBlock.mvSortedAscending()) { + if (percentile == 0) { + return valuesBlock.getDouble(0); + } else if (percentile == 100) { + return valuesBlock.getDouble(valueCount - 1); + } else { + assert lowerIndex >= 0 && upperIndex < valueCount; + return calculateDoublePercentile(fraction, valuesBlock.getDouble(lowerIndex), valuesBlock.getDouble(upperIndex)); + } + } + + if (percentile == 0) { + double min = Double.POSITIVE_INFINITY; + for (int i = 0; i < valueCount; i++) { + min = Math.min(min, valuesBlock.getDouble(firstValueIndex + i)); + } + return min; + } else if (percentile == 100) { + double max = Double.NEGATIVE_INFINITY; + for (int i = 0; i < valueCount; i++) { + max = Math.max(max, valuesBlock.getDouble(firstValueIndex + i)); + } + return max; + } + + if (scratch.values.length < valueCount) { + scratch.values = new double[ArrayUtil.oversize(valueCount, Double.BYTES)]; + } + + for (int i = 0; i < valueCount; i++) { + scratch.values[i] = valuesBlock.getDouble(firstValueIndex + i); + } + + Arrays.sort(scratch.values, 0, valueCount); + + assert lowerIndex >= 0 && upperIndex < valueCount; + return calculateDoublePercentile(fraction, scratch.values[lowerIndex], scratch.values[upperIndex]); + } + + private static int calculateIntPercentile( + IntBlock valuesBlock, + int firstValueIndex, + int valueCount, + double percentile, + IntSortingScratch scratch + ) { + if (valueCount == 1) { + return valuesBlock.getInt(firstValueIndex); + } + + var p = percentile / 100.0; + var index = p * (valueCount - 1); + var lowerIndex = (int) index; + var upperIndex = lowerIndex + 1; + var fraction = index - lowerIndex; + + if (valuesBlock.mvSortedAscending()) { + if (percentile == 0) { + return valuesBlock.getInt(0); + } else if (percentile == 100) { + return valuesBlock.getInt(valueCount - 1); + } else { + assert lowerIndex >= 0 && upperIndex < valueCount; + var lowerValue = valuesBlock.getInt(lowerIndex); + var upperValue = valuesBlock.getInt(upperIndex); + var difference = (long) upperValue - lowerValue; + return lowerValue + (int) (fraction * difference); + } + } + + if (percentile == 0) { + int min = Integer.MAX_VALUE; + for (int i = 0; i < valueCount; i++) { + min = Math.min(min, valuesBlock.getInt(firstValueIndex + i)); + } + return min; + } else if (percentile == 100) { + int max = Integer.MIN_VALUE; + for (int i = 0; i < valueCount; i++) { + max = Math.max(max, valuesBlock.getInt(firstValueIndex + i)); + } + return max; + } + + if (scratch.values.length < valueCount) { + scratch.values = new int[ArrayUtil.oversize(valueCount, Integer.BYTES)]; + } + + for (int i = 0; i < valueCount; i++) { + scratch.values[i] = valuesBlock.getInt(firstValueIndex + i); + } + + Arrays.sort(scratch.values, 0, valueCount); + + assert lowerIndex >= 0 && upperIndex < valueCount; + var lowerValue = scratch.values[lowerIndex]; + var upperValue = scratch.values[upperIndex]; + var difference = (long) upperValue - lowerValue; + return lowerValue + (int) (fraction * difference); + } + + private static long calculateLongPercentile( + LongBlock valuesBlock, + int firstValueIndex, + int valueCount, + double percentile, + LongSortingScratch scratch + ) { + if (valueCount == 1) { + return valuesBlock.getLong(firstValueIndex); + } + + var p = percentile / 100.0; + var index = p * (valueCount - 1); + var lowerIndex = (int) index; + var upperIndex = lowerIndex + 1; + var fraction = index - lowerIndex; + + if (valuesBlock.mvSortedAscending()) { + if (percentile == 0) { + return valuesBlock.getLong(0); + } else if (percentile == 100) { + return valuesBlock.getLong(valueCount - 1); + } else { + assert lowerIndex >= 0 && upperIndex < valueCount; + return calculateLongPercentile(fraction, valuesBlock.getLong(lowerIndex), valuesBlock.getLong(upperIndex)); + } + } + + if (percentile == 0) { + long min = Long.MAX_VALUE; + for (int i = 0; i < valueCount; i++) { + min = Math.min(min, valuesBlock.getLong(firstValueIndex + i)); + } + return min; + } else if (percentile == 100) { + long max = Long.MIN_VALUE; + for (int i = 0; i < valueCount; i++) { + max = Math.max(max, valuesBlock.getLong(firstValueIndex + i)); + } + return max; + } + + if (scratch.values.length < valueCount) { + scratch.values = new long[ArrayUtil.oversize(valueCount, Long.BYTES)]; + } + + for (int i = 0; i < valueCount; i++) { + scratch.values[i] = valuesBlock.getLong(firstValueIndex + i); + } + + Arrays.sort(scratch.values, 0, valueCount); + + assert lowerIndex >= 0 && upperIndex < valueCount; + return calculateLongPercentile(fraction, scratch.values[lowerIndex], scratch.values[upperIndex]); + } + + /** + * Calculates a percentile for a long avoiding overflows and double precision issues. + *

    + * To do that, if the values are over the limit of the representable double integers, + * it uses instead BigDecimals for the calculations. + *

    + */ + private static long calculateLongPercentile(double fraction, long lowerValue, long upperValue) { + if (upperValue < MAX_SAFE_LONG_DOUBLE && lowerValue > -MAX_SAFE_LONG_DOUBLE) { + var difference = upperValue - lowerValue; + return lowerValue + (long) (fraction * difference); + } + + var lowerValueBigDecimal = new BigDecimal(lowerValue); + var upperValueBigDecimal = new BigDecimal(upperValue); + var difference = upperValueBigDecimal.subtract(lowerValueBigDecimal); + var fractionBigDecimal = new BigDecimal(fraction); + return lowerValueBigDecimal.add(fractionBigDecimal.multiply(difference)).longValue(); + } + + /** + * Calculates a percentile for a double avoiding overflows. + *

    + * If the values are too separated (negative + positive), it uses a slightly different approach. + * This approach would fail if the values are big but not separated, so it's only used in this case. + *

    + */ + private static double calculateDoublePercentile(double fraction, double lowerValue, double upperValue) { + if (lowerValue < 0 && upperValue > 0) { + // Order is required to avoid `upper - lower` overflows + return (lowerValue + fraction * upperValue) - fraction * lowerValue; + } + + var difference = upperValue - lowerValue; + return lowerValue + fraction * difference; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java index 65425486ea4e0..f3c87e0e9d1d7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java @@ -301,14 +301,15 @@ private void resolveExpression(Expression expression, Consumer onAgg } expression = resolveSurrogates(expression); + // As expressions may be composed of multiple functions, we need to fold nulls bottom-up + expression = expression.transformUp(e -> new FoldNull().rule(e)); + assertThat(expression.dataType(), equalTo(testCase.expectedType())); + Expression.TypeResolution resolution = expression.typeResolved(); if (resolution.unresolved()) { throw new AssertionError("expected resolved " + resolution.message()); } - expression = new FoldNull().rule(expression); - assertThat(expression.dataType(), equalTo(testCase.expectedType())); - assumeTrue( "Surrogate expression with non-trivial children cannot be evaluated", expression.children() diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java index 66b587f257e2e..3ef2a7f821457 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java @@ -74,6 +74,30 @@ protected static Iterable parameterSuppliersFromTypedDataWithDefaultCh ); } + /** + * Converts a list of test cases into a list of parameter suppliers. + * Also, adds a default set of extra test cases. + *

    + * Use if possible, as this method may get updated with new checks in the future. + *

    + * + * @param nullsExpectedType See {@link #anyNullIsNull(List, ExpectedType, ExpectedEvaluatorToString)} + * @param evaluatorToString See {@link #anyNullIsNull(List, ExpectedType, ExpectedEvaluatorToString)} + */ + protected static Iterable parameterSuppliersFromTypedDataWithDefaultChecks( + ExpectedType nullsExpectedType, + ExpectedEvaluatorToString evaluatorToString, + List suppliers, + PositionalErrorMessageSupplier positionalErrorMessageSupplier + ) { + return parameterSuppliersFromTypedData( + errorsForCasesWithoutExamples( + anyNullIsNull(randomizeBytesRefsOffset(suppliers), nullsExpectedType, evaluatorToString), + positionalErrorMessageSupplier + ) + ); + } + public final void testEvaluate() { assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); boolean readFloating = randomBoolean(); @@ -97,6 +121,7 @@ public final void testEvaluate() { Object result; try (ExpressionEvaluator evaluator = evaluator(expression).get(driverContext())) { try (Block block = evaluator.eval(row(testCase.getDataValues()))) { + assertThat(block.getPositionCount(), is(1)); result = toJavaObjectUnsignedLongAware(block, 0); } } @@ -217,6 +242,7 @@ private void testEvaluateBlock(BlockFactory inputBlockFactory, DriverContext con ExpressionEvaluator eval = evaluator(expression).get(context); Block block = eval.eval(new Page(positions, manyPositionsBlocks)) ) { + assertThat(block.getPositionCount(), is(positions)); for (int p = 0; p < positions; p++) { if (nullPositions.contains(p)) { assertThat(toJavaObject(block, p), allNullsMatcher()); @@ -260,6 +286,7 @@ public final void testEvaluateInManyThreads() throws ExecutionException, Interru try (EvalOperator.ExpressionEvaluator eval = evalSupplier.get(driverContext())) { for (int c = 0; c < count; c++) { try (Block block = eval.eval(page)) { + assertThat(block.getPositionCount(), is(1)); assertThat(toJavaObjectUnsignedLongAware(block, 0), testCase.getMatcher()); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultivalueTestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultivalueTestCaseSupplier.java new file mode 100644 index 0000000000000..01c73e9ef0482 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultivalueTestCaseSupplier.java @@ -0,0 +1,325 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.type.DataType; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.List; + +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.elasticsearch.test.ESTestCase.randomList; +import static org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier.TypedDataSupplier; + +/** + * Extension of {@link TestCaseSupplier} that provided multivalue test cases. + */ +public final class MultivalueTestCaseSupplier { + + private static final int MIN_VALUES = 1; + private static final int MAX_VALUES = 1000; + + private MultivalueTestCaseSupplier() {} + + public static List intCases(int min, int max, boolean includeZero) { + List cases = new ArrayList<>(); + + for (Block.MvOrdering ordering : Block.MvOrdering.values()) { + if (0 <= max && 0 >= min && includeZero) { + cases.add( + new TypedDataSupplier( + "<0 mv " + ordering + " ints>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> 0), ordering), + DataType.INTEGER + ) + ); + } + + if (max != 0) { + cases.add( + new TypedDataSupplier( + "<" + max + " mv " + ordering + " ints>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> max), ordering), + DataType.INTEGER + ) + ); + } + + if (min != 0 && min != max) { + cases.add( + new TypedDataSupplier( + "<" + min + " mv " + ordering + " ints>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> min), ordering), + DataType.INTEGER + ) + ); + } + + int lower = Math.max(min, 1); + int upper = Math.min(max, Integer.MAX_VALUE); + if (lower < upper) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> ESTestCase.randomIntBetween(lower, upper)), ordering), + DataType.INTEGER + ) + ); + } + + int lower1 = Math.max(min, Integer.MIN_VALUE); + int upper1 = Math.min(max, -1); + if (lower1 < upper1) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> ESTestCase.randomIntBetween(lower1, upper1)), ordering), + DataType.INTEGER + ) + ); + } + + if (min < 0 && max > 0) { + cases.add( + new TypedDataSupplier("", () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> { + if (includeZero) { + return ESTestCase.randomIntBetween(min, max); + } + return randomBoolean() ? ESTestCase.randomIntBetween(min, -1) : ESTestCase.randomIntBetween(1, max); + }), ordering), DataType.INTEGER) + ); + } + } + + return cases; + } + + public static List longCases(long min, long max, boolean includeZero) { + List cases = new ArrayList<>(); + + for (Block.MvOrdering ordering : Block.MvOrdering.values()) { + if (0 <= max && 0 >= min && includeZero) { + cases.add( + new TypedDataSupplier( + "<0 mv " + ordering + " longs>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> 0L), ordering), + DataType.LONG + ) + ); + } + + if (max != 0) { + cases.add( + new TypedDataSupplier( + "<" + max + " mv " + ordering + " longs>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> max), ordering), + DataType.LONG + ) + ); + } + + if (min != 0 && min != max) { + cases.add( + new TypedDataSupplier( + "<" + min + " mv " + ordering + " longs>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> min), ordering), + DataType.LONG + ) + ); + } + + long lower = Math.max(min, 1); + long upper = Math.min(max, Long.MAX_VALUE); + if (lower < upper) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> ESTestCase.randomLongBetween(lower, upper)), ordering), + DataType.LONG + ) + ); + } + + long lower1 = Math.max(min, Long.MIN_VALUE); + long upper1 = Math.min(max, -1); + if (lower1 < upper1) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> ESTestCase.randomLongBetween(lower1, upper1)), ordering), + DataType.LONG + ) + ); + } + + if (min < 0 && max > 0) { + cases.add( + new TypedDataSupplier("", () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> { + if (includeZero) { + return ESTestCase.randomLongBetween(min, max); + } + return randomBoolean() ? ESTestCase.randomLongBetween(min, -1) : ESTestCase.randomLongBetween(1, max); + }), ordering), DataType.LONG) + ); + } + } + + return cases; + } + + public static List doubleCases(double min, double max, boolean includeZero) { + List cases = new ArrayList<>(); + + for (Block.MvOrdering ordering : Block.MvOrdering.values()) { + if (0d <= max && 0d >= min && includeZero) { + cases.add( + new TypedDataSupplier( + "<0 mv " + ordering + " doubles>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> 0d), ordering), + DataType.DOUBLE + ) + ); + cases.add( + new TypedDataSupplier( + "<-0 mv " + ordering + " doubles>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> -0d), ordering), + DataType.DOUBLE + ) + ); + } + + if (max != 0d) { + cases.add( + new TypedDataSupplier( + "<" + max + " mv " + ordering + " doubles>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> max), ordering), + DataType.DOUBLE + ) + ); + } + + if (min != 0d && min != max) { + cases.add( + new TypedDataSupplier( + "<" + min + " mv " + ordering + " doubles>", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> min), ordering), + DataType.DOUBLE + ) + ); + } + + double lower1 = Math.max(min, 0d); + double upper1 = Math.min(max, 1d); + if (lower1 < upper1) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder( + randomList(MIN_VALUES, MAX_VALUES, () -> ESTestCase.randomDoubleBetween(lower1, upper1, true)), + ordering + ), + DataType.DOUBLE + ) + ); + } + + double lower2 = Math.max(min, -1d); + double upper2 = Math.min(max, 0d); + if (lower2 < upper2) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder( + randomList(MIN_VALUES, MAX_VALUES, () -> ESTestCase.randomDoubleBetween(lower2, upper2, true)), + ordering + ), + DataType.DOUBLE + ) + ); + } + + double lower3 = Math.max(min, 1d); + double upper3 = Math.min(max, Double.MAX_VALUE); + if (lower3 < upper3) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder( + randomList(MIN_VALUES, MAX_VALUES, () -> ESTestCase.randomDoubleBetween(lower3, upper3, true)), + ordering + ), + DataType.DOUBLE + ) + ); + } + + double lower4 = Math.max(min, -Double.MAX_VALUE); + double upper4 = Math.min(max, -1d); + if (lower4 < upper4) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder( + randomList(MIN_VALUES, MAX_VALUES, () -> ESTestCase.randomDoubleBetween(lower4, upper4, true)), + ordering + ), + DataType.DOUBLE + ) + ); + } + + if (min < 0 && max > 0) { + cases.add( + new TypedDataSupplier( + "", + () -> putInOrder(randomList(MIN_VALUES, MAX_VALUES, () -> { + if (includeZero) { + return ESTestCase.randomDoubleBetween(min, max, true); + } + return randomBoolean() + ? ESTestCase.randomDoubleBetween(min, -1, true) + : ESTestCase.randomDoubleBetween(1, max, true); + }), ordering), + DataType.DOUBLE + ) + ); + } + } + + return cases; + } + + private static > List putInOrder(List mvData, Block.MvOrdering ordering) { + switch (ordering) { + case UNORDERED -> { + } + case DEDUPLICATED_UNORDERD -> { + var dedup = new LinkedHashSet<>(mvData); + mvData.clear(); + mvData.addAll(dedup); + } + case DEDUPLICATED_AND_SORTED_ASCENDING -> { + var dedup = new HashSet<>(mvData); + mvData.clear(); + mvData.addAll(dedup); + Collections.sort(mvData); + } + case SORTED_ASCENDING -> { + Collections.sort(mvData); + } + default -> throw new UnsupportedOperationException("unsupported ordering [" + ordering + "]"); + } + + return mvData; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java index 5ef71e7ae30fb..a1caa784c9787 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java @@ -1289,7 +1289,7 @@ private static String castToUnsignedLongEvaluator(String original, DataType curr throw new UnsupportedOperationException(); } - private static String castToDoubleEvaluator(String original, DataType current) { + public static String castToDoubleEvaluator(String original, DataType current) { if (current == DataType.DOUBLE) { return original; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/PercentileTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/PercentileTests.java index 5271431bd43b8..be11515876966 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/PercentileTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/PercentileTests.java @@ -52,7 +52,7 @@ public static Iterable parameters() { } } - return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers); + return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers, false, (v, p) -> "numeric except unsigned_long"); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java new file mode 100644 index 0000000000000..3410b95458302 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java @@ -0,0 +1,466 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.util.StringUtils; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.MultivalueTestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.function.Supplier; +import java.util.stream.Stream; + +import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; +import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; +import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class MvPercentileTests extends AbstractScalarFunctionTestCase { + public MvPercentileTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List cases = new ArrayList<>(); + + var fieldSuppliers = Stream.of( + MultivalueTestCaseSupplier.intCases(Integer.MIN_VALUE, Integer.MAX_VALUE, true), + MultivalueTestCaseSupplier.longCases(Long.MIN_VALUE, Long.MAX_VALUE, true), + MultivalueTestCaseSupplier.doubleCases(-Double.MAX_VALUE, Double.MAX_VALUE, true) + ).flatMap(List::stream).toList(); + + var percentileSuppliers = Stream.of( + TestCaseSupplier.intCases(0, 100, true), + TestCaseSupplier.longCases(0, 100, true), + TestCaseSupplier.doubleCases(0, 100, true) + ).flatMap(List::stream).toList(); + + for (var fieldSupplier : fieldSuppliers) { + for (var percentileSupplier : percentileSuppliers) { + cases.add(makeSupplier(fieldSupplier, percentileSupplier)); + } + } + + for (var percentileType : List.of(INTEGER, LONG, DataType.DOUBLE)) { + cases.addAll( + List.of( + // Doubles + new TestCaseSupplier( + "median double", + List.of(DOUBLE, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10., 5., 10.), DOUBLE, "field"), + percentileWithType(50, percentileType) + ), + evaluatorString(DOUBLE, percentileType), + DOUBLE, + equalTo(5.) + ) + ), + new TestCaseSupplier( + "single value double", + List.of(DOUBLE, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(55.), DOUBLE, "field"), + percentileWithType(randomIntBetween(0, 100), percentileType) + ), + evaluatorString(DOUBLE, percentileType), + DOUBLE, + equalTo(55.) + ) + ), + new TestCaseSupplier( + "p0 double", + List.of(DOUBLE, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10., 5., 10.), DOUBLE, "field"), + percentileWithType(0, percentileType) + ), + evaluatorString(DOUBLE, percentileType), + DOUBLE, + equalTo(-10.) + ) + ), + new TestCaseSupplier( + "p100 double", + List.of(DOUBLE, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10., 5., 10.), DOUBLE, "field"), + percentileWithType(100, percentileType) + ), + evaluatorString(DOUBLE, percentileType), + DOUBLE, + equalTo(10.) + ) + ), + new TestCaseSupplier( + "averaged double", + List.of(DOUBLE, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10., 5., 10.), DOUBLE, "field"), + percentileWithType(75, percentileType) + ), + evaluatorString(DOUBLE, percentileType), + DOUBLE, + equalTo(7.5) + ) + ), + new TestCaseSupplier( + "big double difference", + List.of(DOUBLE, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-Double.MAX_VALUE, Double.MAX_VALUE), DOUBLE, "field"), + percentileWithType(50, percentileType) + ), + evaluatorString(DOUBLE, percentileType), + DOUBLE, + closeTo(0, 0.0000001) + ) + ), + + // Int + new TestCaseSupplier( + "median int", + List.of(INTEGER, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10, 5, 10), INTEGER, "field"), + percentileWithType(50, percentileType) + ), + evaluatorString(INTEGER, percentileType), + INTEGER, + equalTo(5) + ) + ), + new TestCaseSupplier( + "single value int", + List.of(INTEGER, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(55), INTEGER, "field"), + percentileWithType(randomIntBetween(0, 100), percentileType) + ), + evaluatorString(INTEGER, percentileType), + INTEGER, + equalTo(55) + ) + ), + new TestCaseSupplier( + "p0 int", + List.of(INTEGER, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10, 5, 10), INTEGER, "field"), + percentileWithType(0, percentileType) + ), + evaluatorString(INTEGER, percentileType), + INTEGER, + equalTo(-10) + ) + ), + new TestCaseSupplier( + "p100 int", + List.of(INTEGER, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10, 5, 10), INTEGER, "field"), + percentileWithType(100, percentileType) + ), + evaluatorString(INTEGER, percentileType), + INTEGER, + equalTo(10) + ) + ), + new TestCaseSupplier( + "averaged int", + List.of(INTEGER, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10, 5, 10), INTEGER, "field"), + percentileWithType(75, percentileType) + ), + evaluatorString(INTEGER, percentileType), + INTEGER, + equalTo(7) + ) + ), + new TestCaseSupplier( + "big int difference", + List.of(INTEGER, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(Integer.MIN_VALUE, Integer.MAX_VALUE), INTEGER, "field"), + percentileWithType(50, percentileType) + ), + evaluatorString(INTEGER, percentileType), + INTEGER, + equalTo(-1) // Negative max is 1 smaller than positive max + ) + ), + + // Long + new TestCaseSupplier( + "median long", + List.of(LONG, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10L, 5L, 10L), LONG, "field"), + percentileWithType(50, percentileType) + ), + evaluatorString(LONG, percentileType), + LONG, + equalTo(5L) + ) + ), + new TestCaseSupplier( + "single value long", + List.of(LONG, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(55L), LONG, "field"), + percentileWithType(randomIntBetween(0, 100), percentileType) + ), + evaluatorString(LONG, percentileType), + LONG, + equalTo(55L) + ) + ), + new TestCaseSupplier( + "p0 long", + List.of(LONG, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10L, 5L, 10L), LONG, "field"), + percentileWithType(0, percentileType) + ), + evaluatorString(LONG, percentileType), + LONG, + equalTo(-10L) + ) + ), + new TestCaseSupplier( + "p100 long", + List.of(LONG, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10L, 5L, 10L), LONG, "field"), + percentileWithType(100, percentileType) + ), + evaluatorString(LONG, percentileType), + LONG, + equalTo(10L) + ) + ), + new TestCaseSupplier( + "averaged long", + List.of(LONG, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-10L, 5L, 10L), LONG, "field"), + percentileWithType(75, percentileType) + ), + evaluatorString(LONG, percentileType), + LONG, + equalTo(7L) + ) + ), + new TestCaseSupplier( + "big long difference", + List.of(LONG, percentileType), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(Long.MIN_VALUE, Long.MAX_VALUE), LONG, "field"), + percentileWithType(50, percentileType) + ), + evaluatorString(LONG, percentileType), + LONG, + equalTo(0L) + ) + ) + ) + ); + + for (var fieldType : List.of(INTEGER, LONG, DataType.DOUBLE)) { + cases.add( + new TestCaseSupplier( + "out of bounds percentile <" + fieldType + ", " + percentileType + ">", + List.of(fieldType, percentileType), + () -> { + var percentile = numberWithType( + randomBoolean() ? randomIntBetween(Integer.MIN_VALUE, -1) : randomIntBetween(101, Integer.MAX_VALUE), + percentileType + ); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(numberWithType(0, fieldType), fieldType, "field"), + new TestCaseSupplier.TypedData(percentile, percentileType, "percentile") + ), + evaluatorString(fieldType, percentileType), + fieldType, + nullValue() + ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") + .withWarning( + "Line -1:-1: java.lang.IllegalArgumentException: Percentile parameter must be " + + "a number between 0 and 100, found [" + + percentile.doubleValue() + + "]" + ); + } + ) + ); + } + } + + return parameterSuppliersFromTypedDataWithDefaultChecks( + (nullPosition, nullValueDataType, original) -> nullValueDataType == DataType.NULL && nullPosition == 0 + ? DataType.NULL + : original.expectedType(), + (nullPosition, nullData, original) -> original, + cases, + (v, p) -> "numeric except unsigned_long" + ); + } + + @SuppressWarnings("unchecked") + private static TestCaseSupplier makeSupplier( + TestCaseSupplier.TypedDataSupplier fieldSupplier, + TestCaseSupplier.TypedDataSupplier percentileSupplier + ) { + return new TestCaseSupplier( + "field: " + fieldSupplier.name() + ", percentile: " + percentileSupplier.name(), + List.of(fieldSupplier.type(), percentileSupplier.type()), + () -> { + var fieldTypedData = fieldSupplier.get(); + var percentileTypedData = percentileSupplier.get(); + + var values = (List) fieldTypedData.data(); + var percentile = ((Number) percentileTypedData.data()).doubleValue(); + + var expected = calculatePercentile(values, percentile); + + return new TestCaseSupplier.TestCase( + List.of(fieldTypedData, percentileTypedData), + evaluatorString(fieldSupplier.type(), percentileSupplier.type()), + fieldSupplier.type(), + expected instanceof Double expectedDouble + ? closeTo(expectedDouble, Math.abs(expectedDouble * 0.0000001)) + : equalTo(expected) + ); + } + ); + } + + private static Number calculatePercentile(List rawValues, double percentile) { + if (rawValues.isEmpty() || percentile < 0 || percentile > 100) { + return null; + } + + if (rawValues.size() == 1) { + return rawValues.get(0); + } + + int valueCount = rawValues.size(); + var p = percentile / 100.0; + var index = p * (valueCount - 1); + var lowerIndex = (int) index; + var upperIndex = lowerIndex + 1; + var fraction = index - lowerIndex; + + if (rawValues.get(0) instanceof Integer) { + var values = rawValues.stream().mapToInt(Number::intValue).sorted().toArray(); + + if (percentile == 0) { + return values[0]; + } else if (percentile == 100) { + return values[valueCount - 1]; + } else { + assert lowerIndex >= 0 && upperIndex < valueCount; + var difference = (long) values[upperIndex] - values[lowerIndex]; + return values[lowerIndex] + (int) (fraction * difference); + } + } + + if (rawValues.get(0) instanceof Long) { + var values = rawValues.stream().mapToLong(Number::longValue).sorted().toArray(); + + if (percentile == 0) { + return values[0]; + } else if (percentile == 100) { + return values[valueCount - 1]; + } else { + assert lowerIndex >= 0 && upperIndex < valueCount; + return calculatePercentile(fraction, new BigDecimal(values[lowerIndex]), new BigDecimal(values[upperIndex])).longValue(); + } + } + + if (rawValues.get(0) instanceof Double) { + var values = rawValues.stream().mapToDouble(Number::doubleValue).sorted().toArray(); + + if (percentile == 0) { + return values[0]; + } else if (percentile == 100) { + return values[valueCount - 1]; + } else { + assert lowerIndex >= 0 && upperIndex < valueCount; + return calculatePercentile(fraction, new BigDecimal(values[lowerIndex]), new BigDecimal(values[upperIndex])).doubleValue(); + } + } + + throw new IllegalArgumentException("Unsupported type: " + rawValues.get(0).getClass()); + } + + private static BigDecimal calculatePercentile(double fraction, BigDecimal lowerValue, BigDecimal upperValue) { + return lowerValue.add(new BigDecimal(fraction).multiply(upperValue.subtract(lowerValue))); + } + + private static TestCaseSupplier.TypedData percentileWithType(Number value, DataType type) { + return new TestCaseSupplier.TypedData(numberWithType(value, type), type, "percentile"); + } + + private static Number numberWithType(Number value, DataType type) { + return switch (type) { + case INTEGER -> value.intValue(); + case LONG -> value.longValue(); + default -> value.doubleValue(); + }; + } + + private static String evaluatorString(DataType fieldDataType, DataType percentileDataType) { + var fieldTypeName = StringUtils.underscoreToLowerCamelCase(fieldDataType.name()); + + fieldTypeName = fieldTypeName.substring(0, 1).toUpperCase(Locale.ROOT) + fieldTypeName.substring(1); + + var percentileEvaluator = TestCaseSupplier.castToDoubleEvaluator("Attribute[channel=1]", percentileDataType); + + return "MvPercentile" + fieldTypeName + "Evaluator[values=Attribute[channel=0], percentile=" + percentileEvaluator + "]"; + } + + @Override + protected final Expression build(Source source, List args) { + return new MvPercentile(source, args.get(0), args.get(1)); + } +} From 0dab4b0571c263aa786234c42395390d62bf89cd Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Tue, 20 Aug 2024 08:22:22 -0600 Subject: [PATCH 098/389] (Doc+) Removing "current_node" from Allocation Explain API under Fix Watermark Errors (#111946) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 👋 howdy, team! This just simplifies the Allocation Explain API request to not need to include the `current_node` which may not be known when troubleshooting the [Fix Watermark Errors](https://www.elastic.co/guide/en/elasticsearch/reference/current/fix-watermark-errors.html) guide. TIA! Stef --- .../common-issues/disk-usage-exceeded.asciidoc | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/reference/troubleshooting/common-issues/disk-usage-exceeded.asciidoc b/docs/reference/troubleshooting/common-issues/disk-usage-exceeded.asciidoc index 728d805db7a30..7eb27d5428956 100644 --- a/docs/reference/troubleshooting/common-issues/disk-usage-exceeded.asciidoc +++ b/docs/reference/troubleshooting/common-issues/disk-usage-exceeded.asciidoc @@ -44,13 +44,11 @@ GET _cluster/allocation/explain { "index": "my-index", "shard": 0, - "primary": false, - "current_node": "my-node" + "primary": false } ---- // TEST[s/^/PUT my-index\n/] // TEST[s/"primary": false,/"primary": false/] -// TEST[s/"current_node": "my-node"//] [[fix-watermark-errors-temporary]] ==== Temporary Relief From 30408ce9145ba8325ab3726a347ff1eb0b468de3 Mon Sep 17 00:00:00 2001 From: Siddharth Rayabharam Date: Tue, 20 Aug 2024 11:58:43 -0400 Subject: [PATCH 099/389] Disable tests if adaptive allocation feature flag is disabled (#111942) --- .../xpack/inference/integration/ModelRegistryIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java index 5157683f2dce9..d776f3963c2ca 100644 --- a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java +++ b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java @@ -25,6 +25,7 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsFeatureFlag; import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.inference.registry.ModelRegistry; import org.elasticsearch.xpack.inference.services.elser.ElserInternalModel; @@ -101,6 +102,7 @@ public void testStoreModelWithUnknownFields() throws Exception { } public void testGetModel() throws Exception { + assumeTrue("Only if 'inference_adaptive_allocations' feature flag is enabled", AdaptiveAllocationsFeatureFlag.isEnabled()); String inferenceEntityId = "test-get-model"; Model model = buildElserModelConfig(inferenceEntityId, TaskType.SPARSE_EMBEDDING); AtomicReference putModelHolder = new AtomicReference<>(); From d8e705d5da0db38b0cfc488f503eec4728a2e30f Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 20 Aug 2024 11:59:13 -0400 Subject: [PATCH 100/389] ESQL: Document `date` instead of `datetime` (#111985) This changes the generated types tables in the docs to say `date` instead of `datetime`. That's the name of the field in Elasticsearch so it's a lot less confusing to call it that. Closes #111650 --- .../description/to_datetime.asciidoc | 2 +- .../esql/functions/kibana/definition/add.json | 26 +-- .../functions/kibana/definition/bucket.json | 150 +++++++++--------- .../functions/kibana/definition/case.json | 4 +- .../functions/kibana/definition/coalesce.json | 6 +- .../functions/kibana/definition/count.json | 2 +- .../kibana/definition/count_distinct.json | 8 +- .../kibana/definition/date_diff.json | 8 +- .../kibana/definition/date_extract.json | 4 +- .../kibana/definition/date_format.json | 4 +- .../kibana/definition/date_parse.json | 8 +- .../kibana/definition/date_trunc.json | 8 +- .../functions/kibana/definition/equals.json | 4 +- .../kibana/definition/greater_than.json | 4 +- .../definition/greater_than_or_equal.json | 4 +- .../kibana/definition/less_than.json | 4 +- .../kibana/definition/less_than_or_equal.json | 4 +- .../esql/functions/kibana/definition/max.json | 4 +- .../esql/functions/kibana/definition/min.json | 4 +- .../kibana/definition/mv_append.json | 6 +- .../functions/kibana/definition/mv_count.json | 2 +- .../kibana/definition/mv_dedupe.json | 4 +- .../functions/kibana/definition/mv_first.json | 4 +- .../functions/kibana/definition/mv_last.json | 4 +- .../functions/kibana/definition/mv_max.json | 4 +- .../functions/kibana/definition/mv_min.json | 4 +- .../functions/kibana/definition/mv_slice.json | 4 +- .../functions/kibana/definition/mv_sort.json | 4 +- .../kibana/definition/not_equals.json | 4 +- .../esql/functions/kibana/definition/now.json | 2 +- .../esql/functions/kibana/definition/sub.json | 16 +- .../kibana/definition/to_datetime.json | 18 +-- .../kibana/definition/to_double.json | 2 +- .../kibana/definition/to_integer.json | 2 +- .../functions/kibana/definition/to_long.json | 2 +- .../kibana/definition/to_string.json | 2 +- .../kibana/definition/to_unsigned_long.json | 2 +- .../esql/functions/kibana/definition/top.json | 4 +- .../functions/kibana/definition/values.json | 4 +- .../esql/functions/kibana/docs/to_datetime.md | 2 +- .../esql/functions/parameters/bucket.asciidoc | 2 +- .../esql/functions/types/add.asciidoc | 8 +- .../esql/functions/types/bucket.asciidoc | 22 +-- .../esql/functions/types/case.asciidoc | 2 +- .../esql/functions/types/coalesce.asciidoc | 2 +- .../esql/functions/types/count.asciidoc | 2 +- .../functions/types/count_distinct.asciidoc | 8 +- .../esql/functions/types/date_diff.asciidoc | 4 +- .../functions/types/date_extract.asciidoc | 4 +- .../esql/functions/types/date_format.asciidoc | 4 +- .../esql/functions/types/date_parse.asciidoc | 8 +- .../esql/functions/types/date_trunc.asciidoc | 4 +- .../esql/functions/types/equals.asciidoc | 2 +- .../functions/types/greater_than.asciidoc | 2 +- .../types/greater_than_or_equal.asciidoc | 2 +- .../esql/functions/types/less_than.asciidoc | 2 +- .../types/less_than_or_equal.asciidoc | 2 +- .../esql/functions/types/max.asciidoc | 2 +- .../esql/functions/types/min.asciidoc | 2 +- .../esql/functions/types/mv_append.asciidoc | 2 +- .../esql/functions/types/mv_count.asciidoc | 2 +- .../esql/functions/types/mv_dedupe.asciidoc | 2 +- .../esql/functions/types/mv_first.asciidoc | 2 +- .../esql/functions/types/mv_last.asciidoc | 2 +- .../esql/functions/types/mv_max.asciidoc | 2 +- .../esql/functions/types/mv_min.asciidoc | 2 +- .../esql/functions/types/mv_slice.asciidoc | 2 +- .../esql/functions/types/mv_sort.asciidoc | 2 +- .../esql/functions/types/not_equals.asciidoc | 2 +- .../esql/functions/types/now.asciidoc | 2 +- .../esql/functions/types/sub.asciidoc | 4 +- .../esql/functions/types/to_datetime.asciidoc | 14 +- .../esql/functions/types/to_double.asciidoc | 2 +- .../esql/functions/types/to_integer.asciidoc | 2 +- .../esql/functions/types/to_long.asciidoc | 2 +- .../esql/functions/types/to_string.asciidoc | 2 +- .../functions/types/to_unsigned_long.asciidoc | 2 +- .../esql/functions/types/top.asciidoc | 2 +- .../esql/functions/types/values.asciidoc | 2 +- .../xpack/esql/core/type/DataType.java | 8 + .../function/scalar/convert/ToDatetime.java | 2 +- .../function/AbstractFunctionTestCase.java | 22 ++- 82 files changed, 265 insertions(+), 259 deletions(-) diff --git a/docs/reference/esql/functions/description/to_datetime.asciidoc b/docs/reference/esql/functions/description/to_datetime.asciidoc index ee6866da9ee34..91cbfa0b5fe1e 100644 --- a/docs/reference/esql/functions/description/to_datetime.asciidoc +++ b/docs/reference/esql/functions/description/to_datetime.asciidoc @@ -4,4 +4,4 @@ Converts an input value to a date value. A string will only be successfully converted if it's respecting the format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. To convert dates in other formats, use <>. -NOTE: Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date istruncated, not rounded. +NOTE: Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date is truncated, not rounded. diff --git a/docs/reference/esql/functions/kibana/definition/add.json b/docs/reference/esql/functions/kibana/definition/add.json index e20299821facb..0932a76966560 100644 --- a/docs/reference/esql/functions/kibana/definition/add.json +++ b/docs/reference/esql/functions/kibana/definition/add.json @@ -8,7 +8,7 @@ "params" : [ { "name" : "lhs", - "type" : "date_period", + "type" : "date", "optional" : false, "description" : "A numeric value or a date time value." }, @@ -20,61 +20,61 @@ } ], "variadic" : false, - "returnType" : "date_period" + "returnType" : "date" }, { "params" : [ { "name" : "lhs", - "type" : "date_period", + "type" : "date", "optional" : false, "description" : "A numeric value or a date time value." }, { "name" : "rhs", - "type" : "datetime", + "type" : "time_duration", "optional" : false, "description" : "A numeric value or a date time value." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date_period", "optional" : false, "description" : "A numeric value or a date time value." }, { "name" : "rhs", - "type" : "date_period", + "type" : "date", "optional" : false, "description" : "A numeric value or a date time value." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date_period", "optional" : false, "description" : "A numeric value or a date time value." }, { "name" : "rhs", - "type" : "time_duration", + "type" : "date_period", "optional" : false, "description" : "A numeric value or a date time value." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date_period" }, { "params" : [ @@ -248,13 +248,13 @@ }, { "name" : "rhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "A numeric value or a date time value." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/bucket.json b/docs/reference/esql/functions/kibana/definition/bucket.json index 14bd74c1c20f3..94214a3a4f047 100644 --- a/docs/reference/esql/functions/kibana/definition/bucket.json +++ b/docs/reference/esql/functions/kibana/definition/bucket.json @@ -8,7 +8,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Numeric or date expression from which to derive buckets." }, @@ -16,17 +16,17 @@ "name" : "buckets", "type" : "date_period", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Numeric or date expression from which to derive buckets." }, @@ -34,29 +34,29 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", - "type" : "datetime", + "type" : "date", "optional" : true, "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", - "type" : "datetime", + "type" : "date", "optional" : true, "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Numeric or date expression from which to derive buckets." }, @@ -64,11 +64,11 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", - "type" : "datetime", + "type" : "date", "optional" : true, "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, @@ -80,13 +80,13 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Numeric or date expression from which to derive buckets." }, @@ -94,11 +94,11 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", - "type" : "datetime", + "type" : "date", "optional" : true, "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, @@ -110,13 +110,13 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Numeric or date expression from which to derive buckets." }, @@ -124,7 +124,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -134,19 +134,19 @@ }, { "name" : "to", - "type" : "datetime", + "type" : "date", "optional" : true, "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Numeric or date expression from which to derive buckets." }, @@ -154,7 +154,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -170,13 +170,13 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Numeric or date expression from which to derive buckets." }, @@ -184,7 +184,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -200,13 +200,13 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Numeric or date expression from which to derive buckets." }, @@ -214,7 +214,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -224,19 +224,19 @@ }, { "name" : "to", - "type" : "datetime", + "type" : "date", "optional" : true, "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Numeric or date expression from which to derive buckets." }, @@ -244,7 +244,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -260,13 +260,13 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Numeric or date expression from which to derive buckets." }, @@ -274,7 +274,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -290,13 +290,13 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Numeric or date expression from which to derive buckets." }, @@ -304,11 +304,11 @@ "name" : "buckets", "type" : "time_duration", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -322,7 +322,7 @@ "name" : "buckets", "type" : "double", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -340,7 +340,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -358,7 +358,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -388,7 +388,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -418,7 +418,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -448,7 +448,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -478,7 +478,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -508,7 +508,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -538,7 +538,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -568,7 +568,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -598,7 +598,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -628,7 +628,7 @@ "name" : "buckets", "type" : "long", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -646,7 +646,7 @@ "name" : "buckets", "type" : "double", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -664,7 +664,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -682,7 +682,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -712,7 +712,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -742,7 +742,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -772,7 +772,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -802,7 +802,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -832,7 +832,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -862,7 +862,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -892,7 +892,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -922,7 +922,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -952,7 +952,7 @@ "name" : "buckets", "type" : "long", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -970,7 +970,7 @@ "name" : "buckets", "type" : "double", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -988,7 +988,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -1006,7 +1006,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -1036,7 +1036,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -1066,7 +1066,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -1096,7 +1096,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -1126,7 +1126,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -1156,7 +1156,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -1186,7 +1186,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -1216,7 +1216,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -1246,7 +1246,7 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", @@ -1276,7 +1276,7 @@ "name" : "buckets", "type" : "long", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, diff --git a/docs/reference/esql/functions/kibana/definition/case.json b/docs/reference/esql/functions/kibana/definition/case.json index 5959eed62d37b..27705cd3897f9 100644 --- a/docs/reference/esql/functions/kibana/definition/case.json +++ b/docs/reference/esql/functions/kibana/definition/case.json @@ -50,13 +50,13 @@ }, { "name" : "trueValue", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." } ], "variadic" : true, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/coalesce.json b/docs/reference/esql/functions/kibana/definition/coalesce.json index f00f471e63ecc..2459a4d51bb2d 100644 --- a/docs/reference/esql/functions/kibana/definition/coalesce.json +++ b/docs/reference/esql/functions/kibana/definition/coalesce.json @@ -74,19 +74,19 @@ "params" : [ { "name" : "first", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Expression to evaluate." }, { "name" : "rest", - "type" : "datetime", + "type" : "date", "optional" : true, "description" : "Other expression to evaluate." } ], "variadic" : true, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/count.json b/docs/reference/esql/functions/kibana/definition/count.json index e05ebc6789816..2a15fb3bdd335 100644 --- a/docs/reference/esql/functions/kibana/definition/count.json +++ b/docs/reference/esql/functions/kibana/definition/count.json @@ -32,7 +32,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : true, "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)." } diff --git a/docs/reference/esql/functions/kibana/definition/count_distinct.json b/docs/reference/esql/functions/kibana/definition/count_distinct.json index 801bd26f7d022..f6a148783ba42 100644 --- a/docs/reference/esql/functions/kibana/definition/count_distinct.json +++ b/docs/reference/esql/functions/kibana/definition/count_distinct.json @@ -74,7 +74,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Column or literal for which to count the number of distinct values." } @@ -86,7 +86,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Column or literal for which to count the number of distinct values." }, @@ -104,7 +104,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Column or literal for which to count the number of distinct values." }, @@ -122,7 +122,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Column or literal for which to count the number of distinct values." }, diff --git a/docs/reference/esql/functions/kibana/definition/date_diff.json b/docs/reference/esql/functions/kibana/definition/date_diff.json index 7995d3c6d32b6..d6589f041075d 100644 --- a/docs/reference/esql/functions/kibana/definition/date_diff.json +++ b/docs/reference/esql/functions/kibana/definition/date_diff.json @@ -14,13 +14,13 @@ }, { "name" : "startTimestamp", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "A string representing a start timestamp" }, { "name" : "endTimestamp", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "A string representing an end timestamp" } @@ -38,13 +38,13 @@ }, { "name" : "startTimestamp", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "A string representing a start timestamp" }, { "name" : "endTimestamp", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "A string representing an end timestamp" } diff --git a/docs/reference/esql/functions/kibana/definition/date_extract.json b/docs/reference/esql/functions/kibana/definition/date_extract.json index 75cedcc191b50..557f0e0a47e54 100644 --- a/docs/reference/esql/functions/kibana/definition/date_extract.json +++ b/docs/reference/esql/functions/kibana/definition/date_extract.json @@ -14,7 +14,7 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression. If `null`, the function returns `null`." } @@ -32,7 +32,7 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression. If `null`, the function returns `null`." } diff --git a/docs/reference/esql/functions/kibana/definition/date_format.json b/docs/reference/esql/functions/kibana/definition/date_format.json index 5e8587c046d70..7bd01d7f4ef31 100644 --- a/docs/reference/esql/functions/kibana/definition/date_format.json +++ b/docs/reference/esql/functions/kibana/definition/date_format.json @@ -14,7 +14,7 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression. If `null`, the function returns `null`." } @@ -32,7 +32,7 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression. If `null`, the function returns `null`." } diff --git a/docs/reference/esql/functions/kibana/definition/date_parse.json b/docs/reference/esql/functions/kibana/definition/date_parse.json index 890179143bef8..9400340750c2a 100644 --- a/docs/reference/esql/functions/kibana/definition/date_parse.json +++ b/docs/reference/esql/functions/kibana/definition/date_parse.json @@ -20,7 +20,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -38,7 +38,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -56,7 +56,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -74,7 +74,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/date_trunc.json b/docs/reference/esql/functions/kibana/definition/date_trunc.json index 3d8658c496529..bd3f362d1670b 100644 --- a/docs/reference/esql/functions/kibana/definition/date_trunc.json +++ b/docs/reference/esql/functions/kibana/definition/date_trunc.json @@ -14,13 +14,13 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression" } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -32,13 +32,13 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression" } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/equals.json b/docs/reference/esql/functions/kibana/definition/equals.json index 8d0525ac3e91e..eca80ccdbf657 100644 --- a/docs/reference/esql/functions/kibana/definition/equals.json +++ b/docs/reference/esql/functions/kibana/definition/equals.json @@ -63,13 +63,13 @@ "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." }, { "name" : "rhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." } diff --git a/docs/reference/esql/functions/kibana/definition/greater_than.json b/docs/reference/esql/functions/kibana/definition/greater_than.json index 9083e114bfe9d..7831b0f41cd9d 100644 --- a/docs/reference/esql/functions/kibana/definition/greater_than.json +++ b/docs/reference/esql/functions/kibana/definition/greater_than.json @@ -9,13 +9,13 @@ "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." }, { "name" : "rhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." } diff --git a/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json b/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json index 75888ab25399f..b6a40a838c393 100644 --- a/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json +++ b/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json @@ -9,13 +9,13 @@ "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." }, { "name" : "rhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." } diff --git a/docs/reference/esql/functions/kibana/definition/less_than.json b/docs/reference/esql/functions/kibana/definition/less_than.json index 30c6c9eab0442..bf6b9c5c08774 100644 --- a/docs/reference/esql/functions/kibana/definition/less_than.json +++ b/docs/reference/esql/functions/kibana/definition/less_than.json @@ -9,13 +9,13 @@ "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." }, { "name" : "rhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." } diff --git a/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json b/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json index 64f9c463748d1..4e57161887141 100644 --- a/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json +++ b/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json @@ -9,13 +9,13 @@ "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." }, { "name" : "rhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." } diff --git a/docs/reference/esql/functions/kibana/definition/max.json b/docs/reference/esql/functions/kibana/definition/max.json index 725b42763816d..b13d367d37345 100644 --- a/docs/reference/esql/functions/kibana/definition/max.json +++ b/docs/reference/esql/functions/kibana/definition/max.json @@ -20,13 +20,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "" } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/min.json b/docs/reference/esql/functions/kibana/definition/min.json index 68dfdd6cfd8c0..338ed10d67b2e 100644 --- a/docs/reference/esql/functions/kibana/definition/min.json +++ b/docs/reference/esql/functions/kibana/definition/min.json @@ -20,13 +20,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "" } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_append.json b/docs/reference/esql/functions/kibana/definition/mv_append.json index 8ee4e7297cc3a..3365226141f8f 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_append.json +++ b/docs/reference/esql/functions/kibana/definition/mv_append.json @@ -62,19 +62,19 @@ "params" : [ { "name" : "field1", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "" }, { "name" : "field2", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "" } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_count.json b/docs/reference/esql/functions/kibana/definition/mv_count.json index d414e5b957495..f125327314f4e 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_count.json +++ b/docs/reference/esql/functions/kibana/definition/mv_count.json @@ -44,7 +44,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } diff --git a/docs/reference/esql/functions/kibana/definition/mv_dedupe.json b/docs/reference/esql/functions/kibana/definition/mv_dedupe.json index 7ab287bc94d34..7d66e3dcc0b9b 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_dedupe.json +++ b/docs/reference/esql/functions/kibana/definition/mv_dedupe.json @@ -45,13 +45,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_first.json b/docs/reference/esql/functions/kibana/definition/mv_first.json index e3141e800e4ad..de6e642068517 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_first.json +++ b/docs/reference/esql/functions/kibana/definition/mv_first.json @@ -44,13 +44,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_last.json b/docs/reference/esql/functions/kibana/definition/mv_last.json index e55d66dbf8b93..ea1293e7acfec 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_last.json +++ b/docs/reference/esql/functions/kibana/definition/mv_last.json @@ -44,13 +44,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_max.json b/docs/reference/esql/functions/kibana/definition/mv_max.json index 0783f6d6d5cbc..eb25369f78f77 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_max.json +++ b/docs/reference/esql/functions/kibana/definition/mv_max.json @@ -20,13 +20,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_min.json b/docs/reference/esql/functions/kibana/definition/mv_min.json index cc23df386356e..87ad94338492e 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_min.json +++ b/docs/reference/esql/functions/kibana/definition/mv_min.json @@ -20,13 +20,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_slice.json b/docs/reference/esql/functions/kibana/definition/mv_slice.json index 30d0e1179dc89..ff52467b7d84a 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_slice.json +++ b/docs/reference/esql/functions/kibana/definition/mv_slice.json @@ -80,7 +80,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression. If `null`, the function returns `null`." }, @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_sort.json b/docs/reference/esql/functions/kibana/definition/mv_sort.json index 28b4c9e8d6fea..d2bbd2c0fdbf4 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_sort.json +++ b/docs/reference/esql/functions/kibana/definition/mv_sort.json @@ -26,7 +26,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression. If `null`, the function returns `null`." }, @@ -38,7 +38,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/not_equals.json b/docs/reference/esql/functions/kibana/definition/not_equals.json index 41863f7496a25..4b4d22a5abef4 100644 --- a/docs/reference/esql/functions/kibana/definition/not_equals.json +++ b/docs/reference/esql/functions/kibana/definition/not_equals.json @@ -63,13 +63,13 @@ "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." }, { "name" : "rhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "An expression." } diff --git a/docs/reference/esql/functions/kibana/definition/now.json b/docs/reference/esql/functions/kibana/definition/now.json index 9cdb4945afa2e..1a2fc3a1dc42a 100644 --- a/docs/reference/esql/functions/kibana/definition/now.json +++ b/docs/reference/esql/functions/kibana/definition/now.json @@ -6,7 +6,7 @@ "signatures" : [ { "params" : [ ], - "returnType" : "datetime" + "returnType" : "date" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/sub.json b/docs/reference/esql/functions/kibana/definition/sub.json index 413b0e73f89d0..37e3852865e7f 100644 --- a/docs/reference/esql/functions/kibana/definition/sub.json +++ b/docs/reference/esql/functions/kibana/definition/sub.json @@ -8,7 +8,7 @@ "params" : [ { "name" : "lhs", - "type" : "date_period", + "type" : "date", "optional" : false, "description" : "A numeric value or a date time value." }, @@ -20,43 +20,43 @@ } ], "variadic" : false, - "returnType" : "date_period" + "returnType" : "date" }, { "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "A numeric value or a date time value." }, { "name" : "rhs", - "type" : "date_period", + "type" : "time_duration", "optional" : false, "description" : "A numeric value or a date time value." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "lhs", - "type" : "datetime", + "type" : "date_period", "optional" : false, "description" : "A numeric value or a date time value." }, { "name" : "rhs", - "type" : "time_duration", + "type" : "date_period", "optional" : false, "description" : "A numeric value or a date time value." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date_period" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/to_datetime.json b/docs/reference/esql/functions/kibana/definition/to_datetime.json index 778d151c40151..032e8e1cbda34 100644 --- a/docs/reference/esql/functions/kibana/definition/to_datetime.json +++ b/docs/reference/esql/functions/kibana/definition/to_datetime.json @@ -3,19 +3,19 @@ "type" : "eval", "name" : "to_datetime", "description" : "Converts an input value to a date value.\nA string will only be successfully converted if it's respecting the format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`.\nTo convert dates in other formats, use <>.", - "note" : "Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date istruncated, not rounded.", + "note" : "Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date is truncated, not rounded.", "signatures" : [ { "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -27,7 +27,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -39,7 +39,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -51,7 +51,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -63,7 +63,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -75,7 +75,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -87,7 +87,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/to_double.json b/docs/reference/esql/functions/kibana/definition/to_double.json index f4e414068db61..ae7e4832bfb3c 100644 --- a/docs/reference/esql/functions/kibana/definition/to_double.json +++ b/docs/reference/esql/functions/kibana/definition/to_double.json @@ -56,7 +56,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } diff --git a/docs/reference/esql/functions/kibana/definition/to_integer.json b/docs/reference/esql/functions/kibana/definition/to_integer.json index 2776d8b29c412..5150d12936711 100644 --- a/docs/reference/esql/functions/kibana/definition/to_integer.json +++ b/docs/reference/esql/functions/kibana/definition/to_integer.json @@ -32,7 +32,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } diff --git a/docs/reference/esql/functions/kibana/definition/to_long.json b/docs/reference/esql/functions/kibana/definition/to_long.json index e3218eba9642a..5fd4bce34e7e0 100644 --- a/docs/reference/esql/functions/kibana/definition/to_long.json +++ b/docs/reference/esql/functions/kibana/definition/to_long.json @@ -44,7 +44,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } diff --git a/docs/reference/esql/functions/kibana/definition/to_string.json b/docs/reference/esql/functions/kibana/definition/to_string.json index ef03cc06ea636..ea94171834908 100644 --- a/docs/reference/esql/functions/kibana/definition/to_string.json +++ b/docs/reference/esql/functions/kibana/definition/to_string.json @@ -44,7 +44,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } diff --git a/docs/reference/esql/functions/kibana/definition/to_unsigned_long.json b/docs/reference/esql/functions/kibana/definition/to_unsigned_long.json index d9cba641573fb..5521241224d61 100644 --- a/docs/reference/esql/functions/kibana/definition/to_unsigned_long.json +++ b/docs/reference/esql/functions/kibana/definition/to_unsigned_long.json @@ -20,7 +20,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } diff --git a/docs/reference/esql/functions/kibana/definition/top.json b/docs/reference/esql/functions/kibana/definition/top.json index 4db3aed40a88d..c688bf5ea77c8 100644 --- a/docs/reference/esql/functions/kibana/definition/top.json +++ b/docs/reference/esql/functions/kibana/definition/top.json @@ -32,7 +32,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "The field to collect the top values for." }, @@ -50,7 +50,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/values.json b/docs/reference/esql/functions/kibana/definition/values.json index 3e0036c4d25b6..d9f37cd1ac83d 100644 --- a/docs/reference/esql/functions/kibana/definition/values.json +++ b/docs/reference/esql/functions/kibana/definition/values.json @@ -20,13 +20,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "" } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/docs/to_datetime.md b/docs/reference/esql/functions/kibana/docs/to_datetime.md index 613381615421a..c194dfd17871a 100644 --- a/docs/reference/esql/functions/kibana/docs/to_datetime.md +++ b/docs/reference/esql/functions/kibana/docs/to_datetime.md @@ -11,4 +11,4 @@ To convert dates in other formats, use <>. ROW string = ["1953-09-02T00:00:00.000Z", "1964-06-02T00:00:00.000Z", "1964-06-02 00:00:00"] | EVAL datetime = TO_DATETIME(string) ``` -Note: Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date istruncated, not rounded. +Note: Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date is truncated, not rounded. diff --git a/docs/reference/esql/functions/parameters/bucket.asciidoc b/docs/reference/esql/functions/parameters/bucket.asciidoc index 342ea560aaa0b..09c720d6095f3 100644 --- a/docs/reference/esql/functions/parameters/bucket.asciidoc +++ b/docs/reference/esql/functions/parameters/bucket.asciidoc @@ -6,7 +6,7 @@ Numeric or date expression from which to derive buckets. `buckets`:: -Target number of buckets. +Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted. `from`:: Start of the range. Can be a number, a date or a date expressed as a string. diff --git a/docs/reference/esql/functions/types/add.asciidoc b/docs/reference/esql/functions/types/add.asciidoc index a0215a803d4e3..54d1aec463c1a 100644 --- a/docs/reference/esql/functions/types/add.asciidoc +++ b/docs/reference/esql/functions/types/add.asciidoc @@ -5,10 +5,10 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result +date | date_period | date +date | time_duration | date +date_period | date | date date_period | date_period | date_period -date_period | datetime | datetime -datetime | date_period | datetime -datetime | time_duration | datetime double | double | double double | integer | double double | long | double @@ -18,7 +18,7 @@ integer | long | long long | double | double long | integer | long long | long | long -time_duration | datetime | datetime +time_duration | date | date time_duration | time_duration | time_duration unsigned_long | unsigned_long | unsigned_long |=== diff --git a/docs/reference/esql/functions/types/bucket.asciidoc b/docs/reference/esql/functions/types/bucket.asciidoc index 1cbfad14ca379..172e84b6f7860 100644 --- a/docs/reference/esql/functions/types/bucket.asciidoc +++ b/docs/reference/esql/functions/types/bucket.asciidoc @@ -5,17 +5,17 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== field | buckets | from | to | result -datetime | date_period | | | datetime -datetime | integer | datetime | datetime | datetime -datetime | integer | datetime | keyword | datetime -datetime | integer | datetime | text | datetime -datetime | integer | keyword | datetime | datetime -datetime | integer | keyword | keyword | datetime -datetime | integer | keyword | text | datetime -datetime | integer | text | datetime | datetime -datetime | integer | text | keyword | datetime -datetime | integer | text | text | datetime -datetime | time_duration | | | datetime +date | date_period | | | date +date | integer | date | date | date +date | integer | date | keyword | date +date | integer | date | text | date +date | integer | keyword | date | date +date | integer | keyword | keyword | date +date | integer | keyword | text | date +date | integer | text | date | date +date | integer | text | keyword | date +date | integer | text | text | date +date | time_duration | | | date double | double | | | double double | integer | double | double | double double | integer | double | integer | double diff --git a/docs/reference/esql/functions/types/case.asciidoc b/docs/reference/esql/functions/types/case.asciidoc index 85e4193b5bf2f..f6c8cfe9361d1 100644 --- a/docs/reference/esql/functions/types/case.asciidoc +++ b/docs/reference/esql/functions/types/case.asciidoc @@ -7,7 +7,7 @@ condition | trueValue | result boolean | boolean | boolean boolean | cartesian_point | cartesian_point -boolean | datetime | datetime +boolean | date | date boolean | double | double boolean | geo_point | geo_point boolean | integer | integer diff --git a/docs/reference/esql/functions/types/coalesce.asciidoc b/docs/reference/esql/functions/types/coalesce.asciidoc index 841d836f6837e..368a12db0dca4 100644 --- a/docs/reference/esql/functions/types/coalesce.asciidoc +++ b/docs/reference/esql/functions/types/coalesce.asciidoc @@ -9,7 +9,7 @@ boolean | boolean | boolean boolean | | boolean cartesian_point | cartesian_point | cartesian_point cartesian_shape | cartesian_shape | cartesian_shape -datetime | datetime | datetime +date | date | date geo_point | geo_point | geo_point geo_shape | geo_shape | geo_shape integer | integer | integer diff --git a/docs/reference/esql/functions/types/count.asciidoc b/docs/reference/esql/functions/types/count.asciidoc index 70e79d4899605..959c94c1ec358 100644 --- a/docs/reference/esql/functions/types/count.asciidoc +++ b/docs/reference/esql/functions/types/count.asciidoc @@ -7,7 +7,7 @@ field | result boolean | long cartesian_point | long -datetime | long +date | long double | long geo_point | long integer | long diff --git a/docs/reference/esql/functions/types/count_distinct.asciidoc b/docs/reference/esql/functions/types/count_distinct.asciidoc index 4b201d45732f1..c365c8814573c 100644 --- a/docs/reference/esql/functions/types/count_distinct.asciidoc +++ b/docs/reference/esql/functions/types/count_distinct.asciidoc @@ -9,10 +9,10 @@ boolean | integer | long boolean | long | long boolean | unsigned_long | long boolean | | long -datetime | integer | long -datetime | long | long -datetime | unsigned_long | long -datetime | | long +date | integer | long +date | long | long +date | unsigned_long | long +date | | long double | integer | long double | long | long double | unsigned_long | long diff --git a/docs/reference/esql/functions/types/date_diff.asciidoc b/docs/reference/esql/functions/types/date_diff.asciidoc index 98adcef51e75c..b0a4818f412ac 100644 --- a/docs/reference/esql/functions/types/date_diff.asciidoc +++ b/docs/reference/esql/functions/types/date_diff.asciidoc @@ -5,6 +5,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== unit | startTimestamp | endTimestamp | result -keyword | datetime | datetime | integer -text | datetime | datetime | integer +keyword | date | date | integer +text | date | date | integer |=== diff --git a/docs/reference/esql/functions/types/date_extract.asciidoc b/docs/reference/esql/functions/types/date_extract.asciidoc index 43702ef0671a7..ec9bf70c221cc 100644 --- a/docs/reference/esql/functions/types/date_extract.asciidoc +++ b/docs/reference/esql/functions/types/date_extract.asciidoc @@ -5,6 +5,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== datePart | date | result -keyword | datetime | long -text | datetime | long +keyword | date | long +text | date | long |=== diff --git a/docs/reference/esql/functions/types/date_format.asciidoc b/docs/reference/esql/functions/types/date_format.asciidoc index a76f38653b9b8..b2e97dfa8835a 100644 --- a/docs/reference/esql/functions/types/date_format.asciidoc +++ b/docs/reference/esql/functions/types/date_format.asciidoc @@ -5,6 +5,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== dateFormat | date | result -keyword | datetime | keyword -text | datetime | keyword +keyword | date | keyword +text | date | keyword |=== diff --git a/docs/reference/esql/functions/types/date_parse.asciidoc b/docs/reference/esql/functions/types/date_parse.asciidoc index 314d02eb06271..f3eab18309dd8 100644 --- a/docs/reference/esql/functions/types/date_parse.asciidoc +++ b/docs/reference/esql/functions/types/date_parse.asciidoc @@ -5,8 +5,8 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== datePattern | dateString | result -keyword | keyword | datetime -keyword | text | datetime -text | keyword | datetime -text | text | datetime +keyword | keyword | date +keyword | text | date +text | keyword | date +text | text | date |=== diff --git a/docs/reference/esql/functions/types/date_trunc.asciidoc b/docs/reference/esql/functions/types/date_trunc.asciidoc index 8df45cfef54a8..aa7dee99c6c44 100644 --- a/docs/reference/esql/functions/types/date_trunc.asciidoc +++ b/docs/reference/esql/functions/types/date_trunc.asciidoc @@ -5,6 +5,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== interval | date | result -date_period | datetime | datetime -time_duration | datetime | datetime +date_period | date | date +time_duration | date | date |=== diff --git a/docs/reference/esql/functions/types/equals.asciidoc b/docs/reference/esql/functions/types/equals.asciidoc index 497c9319fedb3..ad0e46ef4b8da 100644 --- a/docs/reference/esql/functions/types/equals.asciidoc +++ b/docs/reference/esql/functions/types/equals.asciidoc @@ -8,7 +8,7 @@ lhs | rhs | result boolean | boolean | boolean cartesian_point | cartesian_point | boolean cartesian_shape | cartesian_shape | boolean -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/greater_than.asciidoc b/docs/reference/esql/functions/types/greater_than.asciidoc index 771daf1a953b2..c506328126a94 100644 --- a/docs/reference/esql/functions/types/greater_than.asciidoc +++ b/docs/reference/esql/functions/types/greater_than.asciidoc @@ -5,7 +5,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc b/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc index 771daf1a953b2..c506328126a94 100644 --- a/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc +++ b/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc @@ -5,7 +5,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/less_than.asciidoc b/docs/reference/esql/functions/types/less_than.asciidoc index 771daf1a953b2..c506328126a94 100644 --- a/docs/reference/esql/functions/types/less_than.asciidoc +++ b/docs/reference/esql/functions/types/less_than.asciidoc @@ -5,7 +5,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/less_than_or_equal.asciidoc b/docs/reference/esql/functions/types/less_than_or_equal.asciidoc index 771daf1a953b2..c506328126a94 100644 --- a/docs/reference/esql/functions/types/less_than_or_equal.asciidoc +++ b/docs/reference/esql/functions/types/less_than_or_equal.asciidoc @@ -5,7 +5,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/max.asciidoc b/docs/reference/esql/functions/types/max.asciidoc index 705745d76dbab..35ce5811e0cd0 100644 --- a/docs/reference/esql/functions/types/max.asciidoc +++ b/docs/reference/esql/functions/types/max.asciidoc @@ -6,7 +6,7 @@ |=== field | result boolean | boolean -datetime | datetime +date | date double | double integer | integer ip | ip diff --git a/docs/reference/esql/functions/types/min.asciidoc b/docs/reference/esql/functions/types/min.asciidoc index 705745d76dbab..35ce5811e0cd0 100644 --- a/docs/reference/esql/functions/types/min.asciidoc +++ b/docs/reference/esql/functions/types/min.asciidoc @@ -6,7 +6,7 @@ |=== field | result boolean | boolean -datetime | datetime +date | date double | double integer | integer ip | ip diff --git a/docs/reference/esql/functions/types/mv_append.asciidoc b/docs/reference/esql/functions/types/mv_append.asciidoc index 49dcef6dc8860..a1894e429ae82 100644 --- a/docs/reference/esql/functions/types/mv_append.asciidoc +++ b/docs/reference/esql/functions/types/mv_append.asciidoc @@ -8,7 +8,7 @@ field1 | field2 | result boolean | boolean | boolean cartesian_point | cartesian_point | cartesian_point cartesian_shape | cartesian_shape | cartesian_shape -datetime | datetime | datetime +date | date | date double | double | double geo_point | geo_point | geo_point geo_shape | geo_shape | geo_shape diff --git a/docs/reference/esql/functions/types/mv_count.asciidoc b/docs/reference/esql/functions/types/mv_count.asciidoc index 8af6b76591acb..260c531731f04 100644 --- a/docs/reference/esql/functions/types/mv_count.asciidoc +++ b/docs/reference/esql/functions/types/mv_count.asciidoc @@ -8,7 +8,7 @@ field | result boolean | integer cartesian_point | integer cartesian_shape | integer -datetime | integer +date | integer double | integer geo_point | integer geo_shape | integer diff --git a/docs/reference/esql/functions/types/mv_dedupe.asciidoc b/docs/reference/esql/functions/types/mv_dedupe.asciidoc index a6b78f781f17a..68e546451c8cb 100644 --- a/docs/reference/esql/functions/types/mv_dedupe.asciidoc +++ b/docs/reference/esql/functions/types/mv_dedupe.asciidoc @@ -8,7 +8,7 @@ field | result boolean | boolean cartesian_point | cartesian_point cartesian_shape | cartesian_shape -datetime | datetime +date | date double | double geo_point | geo_point geo_shape | geo_shape diff --git a/docs/reference/esql/functions/types/mv_first.asciidoc b/docs/reference/esql/functions/types/mv_first.asciidoc index e077c57971a4a..35633544d99a0 100644 --- a/docs/reference/esql/functions/types/mv_first.asciidoc +++ b/docs/reference/esql/functions/types/mv_first.asciidoc @@ -8,7 +8,7 @@ field | result boolean | boolean cartesian_point | cartesian_point cartesian_shape | cartesian_shape -datetime | datetime +date | date double | double geo_point | geo_point geo_shape | geo_shape diff --git a/docs/reference/esql/functions/types/mv_last.asciidoc b/docs/reference/esql/functions/types/mv_last.asciidoc index e077c57971a4a..35633544d99a0 100644 --- a/docs/reference/esql/functions/types/mv_last.asciidoc +++ b/docs/reference/esql/functions/types/mv_last.asciidoc @@ -8,7 +8,7 @@ field | result boolean | boolean cartesian_point | cartesian_point cartesian_shape | cartesian_shape -datetime | datetime +date | date double | double geo_point | geo_point geo_shape | geo_shape diff --git a/docs/reference/esql/functions/types/mv_max.asciidoc b/docs/reference/esql/functions/types/mv_max.asciidoc index 4e5f0a5e0ae89..8ea36aebbad37 100644 --- a/docs/reference/esql/functions/types/mv_max.asciidoc +++ b/docs/reference/esql/functions/types/mv_max.asciidoc @@ -6,7 +6,7 @@ |=== field | result boolean | boolean -datetime | datetime +date | date double | double integer | integer ip | ip diff --git a/docs/reference/esql/functions/types/mv_min.asciidoc b/docs/reference/esql/functions/types/mv_min.asciidoc index 4e5f0a5e0ae89..8ea36aebbad37 100644 --- a/docs/reference/esql/functions/types/mv_min.asciidoc +++ b/docs/reference/esql/functions/types/mv_min.asciidoc @@ -6,7 +6,7 @@ |=== field | result boolean | boolean -datetime | datetime +date | date double | double integer | integer ip | ip diff --git a/docs/reference/esql/functions/types/mv_slice.asciidoc b/docs/reference/esql/functions/types/mv_slice.asciidoc index 568de10f53d32..0a9dc073370c7 100644 --- a/docs/reference/esql/functions/types/mv_slice.asciidoc +++ b/docs/reference/esql/functions/types/mv_slice.asciidoc @@ -8,7 +8,7 @@ field | start | end | result boolean | integer | integer | boolean cartesian_point | integer | integer | cartesian_point cartesian_shape | integer | integer | cartesian_shape -datetime | integer | integer | datetime +date | integer | integer | date double | integer | integer | double geo_point | integer | integer | geo_point geo_shape | integer | integer | geo_shape diff --git a/docs/reference/esql/functions/types/mv_sort.asciidoc b/docs/reference/esql/functions/types/mv_sort.asciidoc index 24925ca8a6587..93965187482ac 100644 --- a/docs/reference/esql/functions/types/mv_sort.asciidoc +++ b/docs/reference/esql/functions/types/mv_sort.asciidoc @@ -6,7 +6,7 @@ |=== field | order | result boolean | keyword | boolean -datetime | keyword | datetime +date | keyword | date double | keyword | double integer | keyword | integer ip | keyword | ip diff --git a/docs/reference/esql/functions/types/not_equals.asciidoc b/docs/reference/esql/functions/types/not_equals.asciidoc index 497c9319fedb3..ad0e46ef4b8da 100644 --- a/docs/reference/esql/functions/types/not_equals.asciidoc +++ b/docs/reference/esql/functions/types/not_equals.asciidoc @@ -8,7 +8,7 @@ lhs | rhs | result boolean | boolean | boolean cartesian_point | cartesian_point | boolean cartesian_shape | cartesian_shape | boolean -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/now.asciidoc b/docs/reference/esql/functions/types/now.asciidoc index 5737d98f2f7db..b474ab1042050 100644 --- a/docs/reference/esql/functions/types/now.asciidoc +++ b/docs/reference/esql/functions/types/now.asciidoc @@ -5,5 +5,5 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== result -datetime +date |=== diff --git a/docs/reference/esql/functions/types/sub.asciidoc b/docs/reference/esql/functions/types/sub.asciidoc index d309f651705f0..c3ded301ebe68 100644 --- a/docs/reference/esql/functions/types/sub.asciidoc +++ b/docs/reference/esql/functions/types/sub.asciidoc @@ -5,9 +5,9 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result +date | date_period | date +date | time_duration | date date_period | date_period | date_period -datetime | date_period | datetime -datetime | time_duration | datetime double | double | double double | integer | double double | long | double diff --git a/docs/reference/esql/functions/types/to_datetime.asciidoc b/docs/reference/esql/functions/types/to_datetime.asciidoc index 52c4cebb661cf..80c986efca794 100644 --- a/docs/reference/esql/functions/types/to_datetime.asciidoc +++ b/docs/reference/esql/functions/types/to_datetime.asciidoc @@ -5,11 +5,11 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== field | result -datetime | datetime -double | datetime -integer | datetime -keyword | datetime -long | datetime -text | datetime -unsigned_long | datetime +date | date +double | date +integer | date +keyword | date +long | date +text | date +unsigned_long | date |=== diff --git a/docs/reference/esql/functions/types/to_double.asciidoc b/docs/reference/esql/functions/types/to_double.asciidoc index cff686c7bc4ca..d5f5833cd7249 100644 --- a/docs/reference/esql/functions/types/to_double.asciidoc +++ b/docs/reference/esql/functions/types/to_double.asciidoc @@ -9,7 +9,7 @@ boolean | double counter_double | double counter_integer | double counter_long | double -datetime | double +date | double double | double integer | double keyword | double diff --git a/docs/reference/esql/functions/types/to_integer.asciidoc b/docs/reference/esql/functions/types/to_integer.asciidoc index 974f3c9c82d88..d67f8f07affd9 100644 --- a/docs/reference/esql/functions/types/to_integer.asciidoc +++ b/docs/reference/esql/functions/types/to_integer.asciidoc @@ -7,7 +7,7 @@ field | result boolean | integer counter_integer | integer -datetime | integer +date | integer double | integer integer | integer keyword | integer diff --git a/docs/reference/esql/functions/types/to_long.asciidoc b/docs/reference/esql/functions/types/to_long.asciidoc index b3959c5444e34..a07990cb1cfbf 100644 --- a/docs/reference/esql/functions/types/to_long.asciidoc +++ b/docs/reference/esql/functions/types/to_long.asciidoc @@ -8,7 +8,7 @@ field | result boolean | long counter_integer | long counter_long | long -datetime | long +date | long double | long integer | long keyword | long diff --git a/docs/reference/esql/functions/types/to_string.asciidoc b/docs/reference/esql/functions/types/to_string.asciidoc index f14cfbb39929f..26a5b31a2a589 100644 --- a/docs/reference/esql/functions/types/to_string.asciidoc +++ b/docs/reference/esql/functions/types/to_string.asciidoc @@ -8,7 +8,7 @@ field | result boolean | keyword cartesian_point | keyword cartesian_shape | keyword -datetime | keyword +date | keyword double | keyword geo_point | keyword geo_shape | keyword diff --git a/docs/reference/esql/functions/types/to_unsigned_long.asciidoc b/docs/reference/esql/functions/types/to_unsigned_long.asciidoc index a271e1a19321d..87b21f3948dad 100644 --- a/docs/reference/esql/functions/types/to_unsigned_long.asciidoc +++ b/docs/reference/esql/functions/types/to_unsigned_long.asciidoc @@ -6,7 +6,7 @@ |=== field | result boolean | unsigned_long -datetime | unsigned_long +date | unsigned_long double | unsigned_long integer | unsigned_long keyword | unsigned_long diff --git a/docs/reference/esql/functions/types/top.asciidoc b/docs/reference/esql/functions/types/top.asciidoc index ff71b2d153e3a..0eb329c10b9ed 100644 --- a/docs/reference/esql/functions/types/top.asciidoc +++ b/docs/reference/esql/functions/types/top.asciidoc @@ -6,7 +6,7 @@ |=== field | limit | order | result boolean | integer | keyword | boolean -datetime | integer | keyword | datetime +date | integer | keyword | date double | integer | keyword | double integer | integer | keyword | integer ip | integer | keyword | ip diff --git a/docs/reference/esql/functions/types/values.asciidoc b/docs/reference/esql/functions/types/values.asciidoc index 705745d76dbab..35ce5811e0cd0 100644 --- a/docs/reference/esql/functions/types/values.asciidoc +++ b/docs/reference/esql/functions/types/values.asciidoc @@ -6,7 +6,7 @@ |=== field | result boolean | boolean -datetime | datetime +date | date double | double integer | integer ip | ip diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java index 065ada06bfa1e..979368c300e00 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java @@ -447,6 +447,14 @@ public String esType() { return esType; } + /** + * Return the Elasticsearch field name of this type if there is one, + * otherwise return the ESQL specific name. + */ + public String esNameIfPossible() { + return esType != null ? esType : typeName; + } + /** * The name we give to types on the response. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java index 2c86dfbac12ce..c66ba7f87a1c5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java @@ -58,7 +58,7 @@ public class ToDatetime extends AbstractConvertFunction { Converts an input value to a date value. A string will only be successfully converted if it's respecting the format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. To convert dates in other formats, use <>.""", - note = "Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date is" + note = "Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date is " + "truncated, not rounded.", examples = { @Example(file = "date", tag = "to_datetime-str", explanation = """ diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index cece2badb2955..efb078cbe80e0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -88,7 +88,6 @@ import java.util.Map; import java.util.Set; import java.util.TreeSet; -import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; @@ -708,13 +707,12 @@ public static void testFunctionInfo() { for (int i = 0; i < args.size(); i++) { typesFromSignature.add(new HashSet<>()); } - Function typeName = dt -> dt.esType() != null ? dt.esType() : dt.typeName(); for (Map.Entry, DataType> entry : signatures().entrySet()) { List types = entry.getKey(); for (int i = 0; i < args.size() && i < types.size(); i++) { - typesFromSignature.get(i).add(typeName.apply(types.get(i))); + typesFromSignature.get(i).add(types.get(i).esNameIfPossible()); } - returnFromSignature.add(typeName.apply(entry.getValue())); + returnFromSignature.add(entry.getValue().esNameIfPossible()); } for (int i = 0; i < args.size(); i++) { @@ -871,15 +869,15 @@ private static void renderTypes(List argNames) throws IOException { } StringBuilder b = new StringBuilder(); for (DataType arg : sig.getKey()) { - b.append(arg.typeName()).append(" | "); + b.append(arg.esNameIfPossible()).append(" | "); } b.append("| ".repeat(argNames.size() - sig.getKey().size())); - b.append(sig.getValue().typeName()); + b.append(sig.getValue().esNameIfPossible()); table.add(b.toString()); } Collections.sort(table); if (table.isEmpty()) { - table.add(signatures.values().iterator().next().typeName()); + table.add(signatures.values().iterator().next().esNameIfPossible()); } String rendered = DOCS_WARNING + """ @@ -1085,7 +1083,7 @@ private static void renderKibanaFunctionDefinition( builder.startArray("params"); builder.endArray(); // There should only be one return type so just use that as the example - builder.field("returnType", signatures().values().iterator().next().typeName()); + builder.field("returnType", signatures().values().iterator().next().esNameIfPossible()); builder.endObject(); } else { int minArgCount = (int) args.stream().filter(a -> false == a.optional()).count(); @@ -1106,14 +1104,14 @@ private static void renderKibanaFunctionDefinition( EsqlFunctionRegistry.ArgSignature arg = args.get(i); builder.startObject(); builder.field("name", arg.name()); - builder.field("type", sig.getKey().get(i).typeName()); + builder.field("type", sig.getKey().get(i).esNameIfPossible()); builder.field("optional", arg.optional()); builder.field("description", arg.description()); builder.endObject(); } builder.endArray(); builder.field("variadic", variadic); - builder.field("returnType", sig.getValue().typeName()); + builder.field("returnType", sig.getValue().esNameIfPossible()); builder.endObject(); } } @@ -1149,12 +1147,12 @@ public int compare(Map.Entry, DataType> lhs, Map.Entry Date: Tue, 20 Aug 2024 12:52:30 -0400 Subject: [PATCH 101/389] Unmute the test, it had already been fixed but wasn't unmuted (#111800) --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index c2e0d48c31a20..b166fb137bc48 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -38,9 +38,6 @@ tests: - class: "org.elasticsearch.xpack.deprecation.DeprecationHttpIT" issue: "https://github.com/elastic/elasticsearch/issues/108628" method: "testDeprecatedSettingsReturnWarnings" -- class: "org.elasticsearch.xpack.inference.InferenceCrudIT" - issue: "https://github.com/elastic/elasticsearch/issues/109391" - method: "testDeleteEndpointWhileReferencedByPipeline" - class: "org.elasticsearch.xpack.test.rest.XPackRestIT" issue: "https://github.com/elastic/elasticsearch/issues/109687" method: "test {p0=sql/translate/Translate SQL}" From a9fa443402a5e2ef86f577b6eea202eed9c8925c Mon Sep 17 00:00:00 2001 From: Volodymyr Krasnikov <129072588+volodk85@users.noreply.github.com> Date: Tue, 20 Aug 2024 10:59:35 -0700 Subject: [PATCH 102/389] Avoid unsafe futures in SharedBlobCacheService (#111526) --- .../blobcache/shared/SharedBlobCacheService.java | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 8ca62a3b95023..584e551f1cf6b 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.RefCountingRunnable; -import org.elasticsearch.action.support.UnsafePlainActionFuture; import org.elasticsearch.blobcache.BlobCacheMetrics; import org.elasticsearch.blobcache.BlobCacheUtils; import org.elasticsearch.blobcache.common.ByteRange; @@ -39,7 +38,6 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.monitor.fs.FsProbe; import org.elasticsearch.node.NodeRoleSettings; -import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -1162,9 +1160,7 @@ private int readSingleRegion( RangeMissingHandler writer, int region ) throws InterruptedException, ExecutionException { - final PlainActionFuture readFuture = new UnsafePlainActionFuture<>( - BlobStoreRepository.STATELESS_SHARD_PREWARMING_THREAD_NAME - ); + final PlainActionFuture readFuture = new PlainActionFuture<>(); final CacheFileRegion fileRegion = get(cacheKey, length, region); final long regionStart = getRegionStart(region); fileRegion.populateAndRead( @@ -1186,9 +1182,7 @@ private int readMultiRegions( int startRegion, int endRegion ) throws InterruptedException, ExecutionException { - final PlainActionFuture readsComplete = new UnsafePlainActionFuture<>( - BlobStoreRepository.STATELESS_SHARD_PREWARMING_THREAD_NAME - ); + final PlainActionFuture readsComplete = new PlainActionFuture<>(); final AtomicInteger bytesRead = new AtomicInteger(); try (var listeners = new RefCountingListener(1, readsComplete)) { for (int region = startRegion; region <= endRegion; region++) { From 0f8ce788ad8901a5d457de72ff9164c68860484e Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Tue, 20 Aug 2024 14:34:44 -0400 Subject: [PATCH 103/389] [ML] Migrate Inference to ChunkedToXContent (#111655) In preperation of streaming Inference responses, we are migrating RestInferenceAction and corresponding result objects from ToXContent to ChunkedToXContent. RestInferenceAction will now use the built in ChunkedRestResponseBodyPart and send a single item before closing the stream. --- docs/changelog/111655.yaml | 5 ++ .../inference/InferenceServiceResults.java | 4 +- .../inference/action/InferenceAction.java | 20 +++--- .../results/ChatCompletionResults.java | 12 ++-- .../results/ErrorChunkedInferenceResults.java | 8 +-- ...nferenceChunkedSparseEmbeddingResults.java | 10 +-- ...erenceChunkedTextEmbeddingByteResults.java | 12 ++-- ...renceChunkedTextEmbeddingFloatResults.java | 10 +-- .../InferenceTextEmbeddingByteResults.java | 12 ++-- .../InferenceTextEmbeddingFloatResults.java | 12 ++-- .../inference/results/RankedDocsResults.java | 12 ++-- .../results/SparseEmbeddingResults.java | 14 ++-- .../results/RankedDocsResultsTests.java | 4 +- ...stractChunkedBWCSerializationTestCase.java | 67 +++++++++++++++++++ .../inference/rest/RestInferenceAction.java | 4 +- 15 files changed, 129 insertions(+), 77 deletions(-) create mode 100644 docs/changelog/111655.yaml create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/AbstractChunkedBWCSerializationTestCase.java diff --git a/docs/changelog/111655.yaml b/docs/changelog/111655.yaml new file mode 100644 index 0000000000000..077714d15a712 --- /dev/null +++ b/docs/changelog/111655.yaml @@ -0,0 +1,5 @@ +pr: 111655 +summary: Migrate Inference to `ChunkedToXContent` +area: Machine Learning +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java b/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java index 62166115820f5..f8330404c1538 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java @@ -9,12 +9,12 @@ package org.elasticsearch.inference; import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import java.util.List; import java.util.Map; -public interface InferenceServiceResults extends NamedWriteable, ToXContentFragment { +public interface InferenceServiceResults extends NamedWriteable, ChunkedToXContent { /** * Transform the result to match the format required for the TransportCoordinatedInferenceAction. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java index 53e404b48dc2e..7ecb5aef4ce8d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java @@ -14,8 +14,11 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.InferenceServiceResults; @@ -24,8 +27,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.inference.results.LegacyTextEmbeddingResults; import org.elasticsearch.xpack.core.inference.results.SparseEmbeddingResults; @@ -34,6 +36,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.EnumSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -318,7 +321,7 @@ public String toString() { } } - public static class Response extends ActionResponse implements ToXContentObject { + public static class Response extends ActionResponse implements ChunkedToXContentObject { private final InferenceServiceResults results; @@ -398,11 +401,12 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - results.toXContent(builder, params); - builder.endObject(); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return Iterators.concat( + ChunkedToXContentHelper.startObject(), + results.toXContentChunked(params), + ChunkedToXContentHelper.endObject() + ); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ChatCompletionResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ChatCompletionResults.java index bbd4d026f0d55..902c69cef558e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ChatCompletionResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ChatCompletionResults.java @@ -10,12 +10,15 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; @@ -46,13 +49,8 @@ public ChatCompletionResults(StreamInput in) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray(COMPLETION); - for (Result result : results) { - result.toXContent(builder, params); - } - builder.endArray(); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return ChunkedToXContentHelper.array(COMPLETION, results.iterator()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ErrorChunkedInferenceResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ErrorChunkedInferenceResults.java index 376b8763a5eb9..18f88a8ff022a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ErrorChunkedInferenceResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ErrorChunkedInferenceResults.java @@ -11,10 +11,11 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContent; -import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Iterator; @@ -89,9 +90,8 @@ public String toString() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(NAME, exception.getMessage()); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return ChunkedToXContentHelper.field(NAME, exception.getMessage()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedSparseEmbeddingResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedSparseEmbeddingResults.java index f1265873ad6dd..187b186fcd91d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedSparseEmbeddingResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedSparseEmbeddingResults.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.xcontent.ToXContent; @@ -77,13 +78,8 @@ public List getChunkedResults() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray(FIELD_NAME); - for (MlChunkedTextExpansionResults.ChunkedResult chunk : chunkedResults) { - chunk.toXContent(builder, params); - } - builder.endArray(); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return ChunkedToXContentHelper.array(FIELD_NAME, chunkedResults.iterator()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedTextEmbeddingByteResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedTextEmbeddingByteResults.java index b78bce8c5c2cd..cc245c40c51e3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedTextEmbeddingByteResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedTextEmbeddingByteResults.java @@ -12,8 +12,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -61,14 +63,8 @@ public InferenceChunkedTextEmbeddingByteResults(StreamInput in) throws IOExcepti } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - // TODO add isTruncated flag - builder.startArray(FIELD_NAME); - for (var embedding : chunks) { - embedding.toXContent(builder, params); - } - builder.endArray(); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return ChunkedToXContentHelper.array(FIELD_NAME, chunks.iterator()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedTextEmbeddingFloatResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedTextEmbeddingFloatResults.java index 9fead334dcbc0..4b4d77cd3f043 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedTextEmbeddingFloatResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceChunkedTextEmbeddingFloatResults.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.xcontent.ToXContent; @@ -74,14 +75,9 @@ public static InferenceChunkedTextEmbeddingFloatResults ofMlResults(MlChunkedTex } @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + public Iterator toXContentChunked(ToXContent.Params params) { // TODO add isTruncated flag - builder.startArray(FIELD_NAME); - for (var embedding : chunks) { - embedding.toXContent(builder, params); - } - builder.endArray(); - return builder; + return ChunkedToXContentHelper.array(FIELD_NAME, chunks.iterator()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceTextEmbeddingByteResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceTextEmbeddingByteResults.java index 8d94083bf3241..16dca7b04d526 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceTextEmbeddingByteResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceTextEmbeddingByteResults.java @@ -13,8 +13,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.inference.results.MlTextEmbeddingResults; @@ -22,6 +24,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -58,13 +61,8 @@ public int getFirstEmbeddingSize() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray(TEXT_EMBEDDING_BYTES); - for (InferenceByteEmbedding embedding : embeddings) { - embedding.toXContent(builder, params); - } - builder.endArray(); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return ChunkedToXContentHelper.array(TEXT_EMBEDDING_BYTES, embeddings.iterator()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceTextEmbeddingFloatResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceTextEmbeddingFloatResults.java index 1822e3af28c2d..9f9bdfec7cfae 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceTextEmbeddingFloatResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/InferenceTextEmbeddingFloatResults.java @@ -14,10 +14,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.inference.results.MlTextEmbeddingResults; @@ -25,6 +27,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -99,13 +102,8 @@ public int getFirstEmbeddingSize() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray(TEXT_EMBEDDING); - for (InferenceFloatEmbedding embedding : embeddings) { - embedding.toXContent(builder, params); - } - builder.endArray(); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return ChunkedToXContentHelper.array(TEXT_EMBEDDING, embeddings.iterator()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResults.java index 9196a57c868ba..6ebf15bf34937 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResults.java @@ -11,17 +11,20 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.TaskType; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.ArrayList; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -172,13 +175,8 @@ public List getRankedDocs() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray(RERANK); - for (RankedDoc rankedDoc : rankedDocs) { - rankedDoc.toXContent(builder, params); - } - builder.endArray(); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return ChunkedToXContentHelper.array(RERANK, rankedDocs.iterator()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/SparseEmbeddingResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/SparseEmbeddingResults.java index 1db6dcc802d00..dd8229c604ecb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/SparseEmbeddingResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/SparseEmbeddingResults.java @@ -12,10 +12,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.inference.results.TextExpansionResults; @@ -23,6 +25,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -68,15 +71,8 @@ public static SparseEmbeddingResults of(List results } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray(SPARSE_EMBEDDING); - - for (Embedding embedding : embeddings) { - embedding.toXContent(builder, params); - } - - builder.endArray(); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return ChunkedToXContentHelper.array(SPARSE_EMBEDDING, embeddings.iterator()); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResultsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResultsTests.java index 603531f0aedf9..b84aaa2bcfc1b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResultsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResultsTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; +import org.elasticsearch.xpack.core.ml.AbstractChunkedBWCSerializationTestCase; import java.io.IOException; import java.util.ArrayList; @@ -18,7 +18,7 @@ import static org.elasticsearch.TransportVersions.ML_RERANK_DOC_OPTIONAL; -public class RankedDocsResultsTests extends AbstractBWCSerializationTestCase { +public class RankedDocsResultsTests extends AbstractChunkedBWCSerializationTestCase { @Override protected Writeable.Reader instanceReader() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/AbstractChunkedBWCSerializationTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/AbstractChunkedBWCSerializationTestCase.java new file mode 100644 index 0000000000000..a23ce2c107fe3 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/AbstractChunkedBWCSerializationTestCase.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.ml; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.test.AbstractChunkedSerializingTestCase; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase.DEFAULT_BWC_VERSIONS; + +public abstract class AbstractChunkedBWCSerializationTestCase extends + AbstractChunkedSerializingTestCase { + + /** + * Returns the expected instance if serialized from the given version. + */ + protected abstract T mutateInstanceForVersion(T instance, TransportVersion version); + + /** + * The bwc versions to test serialization against + */ + protected List bwcVersions() { + return DEFAULT_BWC_VERSIONS; + } + + /** + * Test serialization and deserialization of the test instance across versions + */ + public final void testBwcSerialization() throws IOException { + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + T testInstance = createTestInstance(); + for (TransportVersion bwcVersion : bwcVersions()) { + assertBwcSerialization(testInstance, bwcVersion); + } + } + } + + /** + * Assert that instances copied at a particular version are equal. The version is useful + * for sanity checking the backwards compatibility of the wire. It isn't a substitute for + * real backwards compatibility tests but it is *so* much faster. + */ + protected final void assertBwcSerialization(T testInstance, TransportVersion version) throws IOException { + T deserializedInstance = copyWriteable(testInstance, getNamedWriteableRegistry(), instanceReader(), version); + assertOnBWCObject(deserializedInstance, mutateInstanceForVersion(testInstance, version), version); + } + + /** + * @param bwcSerializedObject The object deserialized from the previous version + * @param testInstance The original test instance + * @param version The version which serialized + */ + protected void assertOnBWCObject(T bwcSerializedObject, T testInstance, TransportVersion version) { + assertNotSame(version.toString(), bwcSerializedObject, testInstance); + assertEquals(version.toString(), bwcSerializedObject, testInstance); + assertEquals(version.toString(), bwcSerializedObject.hashCode(), testInstance.hashCode()); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestInferenceAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestInferenceAction.java index f4bbcbebf0340..f5c30d0a94c54 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestInferenceAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestInferenceAction.java @@ -13,7 +13,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.rest.action.RestChunkedToXContentListener; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import java.io.IOException; @@ -59,6 +59,6 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient InferenceAction.Request.DEFAULT_TIMEOUT ); requestBuilder.setInferenceTimeout(inferTimeout); - return channel -> client.execute(InferenceAction.INSTANCE, requestBuilder.build(), new RestToXContentListener<>(channel)); + return channel -> client.execute(InferenceAction.INSTANCE, requestBuilder.build(), new RestChunkedToXContentListener<>(channel)); } } From 94c48ac589b52a46868801a9d06ae48bb175f3c5 Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Tue, 20 Aug 2024 14:35:39 -0400 Subject: [PATCH 104/389] [ML] Remove HttpClientContext (#111914) No requests are making use of this parameter, and it is clashing with Bedrock. We will bring it back in a later change as part of the Http Request portion of the Sender. --- ...AmazonBedrockExecuteOnlyRequestSender.java | 2 -- .../external/http/retry/RequestSender.java | 2 -- .../http/retry/RetryingHttpSender.java | 10 ++++-- ...onBedrockChatCompletionRequestManager.java | 3 +- ...AmazonBedrockEmbeddingsRequestManager.java | 3 +- .../sender/ExecutableInferenceRequest.java | 3 +- .../http/retry/RetryingHttpSenderTests.java | 34 ++++++++----------- .../sender/RequestExecutorServiceTests.java | 18 +++++----- .../http/sender/RequestManagerTests.java | 2 -- 9 files changed, 35 insertions(+), 42 deletions(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecuteOnlyRequestSender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecuteOnlyRequestSender.java index a08acab655936..0826d990a80a5 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecuteOnlyRequestSender.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecuteOnlyRequestSender.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.external.amazonbedrock; -import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -45,7 +44,6 @@ public AmazonBedrockExecuteOnlyRequestSender(AmazonBedrockClientCache clientCach public void send( Logger logger, Request request, - HttpClientContext context, Supplier hasRequestTimedOutFunction, ResponseHandler responseHandler, ActionListener listener diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RequestSender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RequestSender.java index 8244e5ad29e95..8e55b0988de6a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RequestSender.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RequestSender.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.external.http.retry; -import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.InferenceServiceResults; @@ -19,7 +18,6 @@ public interface RequestSender { void send( Logger logger, Request request, - HttpClientContext context, Supplier hasRequestTimedOutFunction, ResponseHandler responseHandler, ActionListener listener diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java index dd45501564e4e..263bdea5ce368 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java @@ -189,12 +189,18 @@ public boolean shouldRetry(Exception e) { public void send( Logger logger, Request request, - HttpClientContext context, Supplier hasRequestTimedOutFunction, ResponseHandler responseHandler, ActionListener listener ) { - InternalRetrier retrier = new InternalRetrier(logger, request, context, hasRequestTimedOutFunction, responseHandler, listener); + var retrier = new InternalRetrier( + logger, + request, + HttpClientContext.create(), + hasRequestTimedOutFunction, + responseHandler, + listener + ); retrier.run(); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java index 8642a19b26a7d..1c6bb58717942 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.external.http.sender; -import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; @@ -52,7 +51,7 @@ public void execute( var responseHandler = new AmazonBedrockChatCompletionResponseHandler(); try { - requestSender.send(logger, request, HttpClientContext.create(), hasRequestCompletedFunction, responseHandler, listener); + requestSender.send(logger, request, hasRequestCompletedFunction, responseHandler, listener); } catch (Exception e) { var errorMessage = Strings.format( "Failed to send [completion] request from inference entity id [%s]", diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockEmbeddingsRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockEmbeddingsRequestManager.java index 2f94cdf342938..34aacbf67af6f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockEmbeddingsRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockEmbeddingsRequestManager.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.external.http.sender; -import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; @@ -61,7 +60,7 @@ public void execute( var responseHandler = new AmazonBedrockEmbeddingsResponseHandler(); var request = new AmazonBedrockEmbeddingsRequest(truncator, truncatedInput, embeddingsModel, requestEntity, timeout); try { - requestSender.send(logger, request, HttpClientContext.create(), hasRequestCompletedFunction, responseHandler, listener); + requestSender.send(logger, request, hasRequestCompletedFunction, responseHandler, listener); } catch (Exception e) { var errorMessage = Strings.format( "Failed to send [text_embedding] request from inference entity id [%s]", diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableInferenceRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableInferenceRequest.java index 214eba4ee3485..241466422e47b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableInferenceRequest.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ExecutableInferenceRequest.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.external.http.sender; -import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; @@ -33,7 +32,7 @@ public void run() { var inferenceEntityId = request.createHttpRequest().inferenceEntityId(); try { - requestSender.send(logger, request, HttpClientContext.create(), hasFinished, responseHandler, listener); + requestSender.send(logger, request, hasFinished, responseHandler, listener); } catch (Exception e) { var errorMessage = Strings.format("Failed to send request from inference entity id [%s]", inferenceEntityId); logger.warn(errorMessage, e); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java index c2842a1278a49..f70ab43908827 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java @@ -10,7 +10,6 @@ import org.apache.http.ConnectionClosedException; import org.apache.http.HttpResponse; import org.apache.http.StatusLine; -import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; @@ -80,7 +79,7 @@ public void testSend_CallsSenderAgain_AfterValidateResponseThrowsAnException() t var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); verify(httpClient, times(2)).send(any(), any(), any()); @@ -111,7 +110,7 @@ public void testSend_CallsSenderAgain_WhenAFailureStatusCodeIsReturned() throws var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); verify(httpClient, times(2)).send(any(), any(), any()); @@ -139,7 +138,7 @@ public void testSend_CallsSenderAgain_WhenParsingFailsOnce() throws IOException var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); verify(httpClient, times(2)).send(any(), any(), any()); @@ -167,7 +166,7 @@ public void testSend_DoesNotCallSenderAgain_WhenParsingFailsWithNonRetryableExce var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 0); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 0); var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed")); @@ -202,7 +201,7 @@ public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnce() var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); verify(httpClient, times(2)).send(any(), any(), any()); @@ -235,7 +234,7 @@ public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnce_W var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); verify(httpClient, times(2)).send(any(), any(), any()); @@ -268,7 +267,7 @@ public void testSend_CallsSenderAgain_WhenHttpResultListenerCallsOnFailureOnceWi var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); assertThat(listener.actionGet(TIMEOUT), is(inferenceResults)); verify(httpClient, times(2)).send(any(), any(), any()); @@ -295,7 +294,7 @@ public void testSend_ReturnsFailure_WhenHttpResultListenerCallsOnFailureOnceWith var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 0); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 0); var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("Invalid host [null], please check that the URL is correct.")); @@ -317,10 +316,7 @@ public void testSend_ReturnsElasticsearchExceptionFailure_WhenTheHttpClientThrow var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks( - () -> retrier.send(mock(Logger.class), mockRequest("id"), HttpClientContext.create(), () -> false, handler, listener), - 0 - ); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest("id"), () -> false, handler, listener), 0); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("Http client failed to send request from inference entity id [id]")); @@ -354,7 +350,7 @@ public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnException_AfterO var retrier = createRetrier(sender); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed again")); @@ -391,7 +387,7 @@ public void testSend_ReturnsFailure_WhenValidateResponseThrowsAnElasticsearchExc var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); var thrownException = expectThrows(RetryException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed again")); @@ -423,7 +419,7 @@ public void testSend_ReturnsFailure_WhenHttpResultsListenerCallsOnFailure_AfterO var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 1); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 1); var thrownException = expectThrows(RetryException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed again")); @@ -449,7 +445,7 @@ public void testSend_ReturnsFailure_WhenHttpResultsListenerCallsOnFailure_WithNo var retrier = createRetrier(httpClient); var listener = new PlainActionFuture(); - executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener), 0); + executeTasks(() -> retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener), 0); var thrownException = expectThrows(IllegalStateException.class, () -> listener.actionGet(TIMEOUT)); assertThat(thrownException.getMessage(), is("failed")); @@ -484,7 +480,7 @@ public void testSend_DoesNotRetryIndefinitely() throws IOException { ); var listener = new PlainActionFuture(); - retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener); + retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener); // Assert that the retrying sender stopped after max retires even though the exception is retryable var thrownException = expectThrows(UncategorizedExecutionException.class, () -> listener.actionGet(TIMEOUT)); @@ -524,7 +520,7 @@ public void testSend_DoesNotRetryIndefinitely_WithAlwaysRetryingResponseHandler( ); var listener = new PlainActionFuture(); - retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener); + retrier.send(mock(Logger.class), mockRequest(), () -> false, handler, listener); // Assert that the retrying sender stopped after max retires var thrownException = expectThrows(UncategorizedExecutionException.class, () -> listener.actionGet(TIMEOUT)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java index 762a3a74184a4..e09e4968571e5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java @@ -123,7 +123,7 @@ public void testIsTerminated_AfterStopFromSeparateThread() { waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(requestSender).send(any(), any(), any(), any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any()); var service = createRequestExecutorService(null, requestSender); @@ -203,7 +203,7 @@ public void testTaskThrowsError_CallsOnFailure() { doAnswer(invocation -> { service.shutdown(); throw new IllegalArgumentException("failed"); - }).when(requestSender).send(any(), any(), any(), any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any()); PlainActionFuture listener = new PlainActionFuture<>(); @@ -270,13 +270,13 @@ public void testExecute_PreservesThreadContext() throws InterruptedException, Ex assertNull(serviceThreadContext.getHeader(headerKey)); @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[5]; + ActionListener listener = invocation.getArgument(4, ActionListener.class); listener.onResponse(null); waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(requestSender).send(any(), any(), any(), any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any()); var finishedOnResponse = new CountDownLatch(1); ActionListener listener = new ActionListener<>() { @@ -422,7 +422,7 @@ public void testChangingCapacity_SetsCapacityToTwo() throws ExecutionException, waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(requestSender).send(any(), any(), any(), any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any()); Future executorTermination = submitShutdownRequest(waitToShutdown, waitToReturnFromSend, service); @@ -467,7 +467,7 @@ public void testChangingCapacity_DoesNotRejectsOverflowTasks_BecauseOfQueueFull( waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(requestSender).send(any(), any(), any(), any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any()); Future executorTermination = submitShutdownRequest(waitToShutdown, waitToReturnFromSend, service); @@ -528,7 +528,7 @@ public void testChangingCapacity_ToZero_SetsQueueCapacityToUnbounded() throws IO waitToShutdown.countDown(); waitToReturnFromSend.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); return Void.TYPE; - }).when(requestSender).send(any(), any(), any(), any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any()); Future executorTermination = submitShutdownRequest(waitToShutdown, waitToReturnFromSend, service); @@ -598,11 +598,11 @@ public void testDoesNotExecuteTask_WhenCannotReserveTokens_AndThenCanReserve_And doAnswer(invocation -> { service.shutdown(); return Void.TYPE; - }).when(requestSender).send(any(), any(), any(), any(), any(), any()); + }).when(requestSender).send(any(), any(), any(), any(), any()); service.start(); - verify(requestSender, times(1)).send(any(), any(), any(), any(), any(), any()); + verify(requestSender, times(1)).send(any(), any(), any(), any(), any()); } public void testRemovesRateLimitGroup_AfterStaleDuration() { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestManagerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestManagerTests.java index 8b7c01ae133cf..d8a1f2c4227e4 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestManagerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestManagerTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.external.http.sender; -import org.apache.http.client.protocol.HttpClientContext; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.InferenceServiceResults; @@ -47,7 +46,6 @@ public static RequestManager createMock(RequestSender requestSender, String infe requestSender.send( mock(Logger.class), RequestTests.mockRequest(inferenceEntityId), - HttpClientContext.create(), () -> false, mock(ResponseHandler.class), listener From 8841c61ee347994b9afd4ff0af57f56a06ea52d3 Mon Sep 17 00:00:00 2001 From: Fang Xing <155562079+fang-xing-esql@users.noreply.github.com> Date: Tue, 20 Aug 2024 14:44:56 -0400 Subject: [PATCH 105/389] EsqlCapabilities for casting string to version (#112032) --- muted-tests.yml | 3 --- .../qa/testFixtures/src/main/resources/comparison.csv-spec | 1 + 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index b166fb137bc48..f59b1540aaf16 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -131,9 +131,6 @@ tests: - class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT method: testSnapshotRestore {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111799 -- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT - method: test {comparison.RangeVersion SYNC} - issue: https://github.com/elastic/elasticsearch/issues/111814 - class: org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT method: "test {p0=esql/26_aggs_bucket/friendlier BUCKET interval hourly: #110916}" issue: https://github.com/elastic/elasticsearch/issues/111901 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/comparison.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/comparison.csv-spec index ef07f1dae3c1a..e0b921947e16d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/comparison.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/comparison.csv-spec @@ -181,6 +181,7 @@ emp_no:integer |first_name:keyword ; rangeVersion +required_capability: string_literal_auto_casting_extended from apps | where version > "2" and version < "4" | keep id, version From fe5fa0575b82d7ce2c4570335d3f4a4d364bec27 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 21 Aug 2024 05:16:20 +1000 Subject: [PATCH 106/389] Mute org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT test {mv_percentile.FromIndex SYNC} #112036 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index f59b1540aaf16..33a65c8a90e23 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -178,6 +178,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.FieldExtractorIT method: testScaledFloat issue: https://github.com/elastic/elasticsearch/issues/112003 +- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT + method: test {mv_percentile.FromIndex SYNC} + issue: https://github.com/elastic/elasticsearch/issues/112036 # Examples: # From 94aa6ce1b262d6a41d51c4351249de8f3d714e76 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 21 Aug 2024 05:16:33 +1000 Subject: [PATCH 107/389] Mute org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT test {mv_percentile.FromIndex ASYNC} #112037 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 33a65c8a90e23..049ec985c7456 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -181,6 +181,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT method: test {mv_percentile.FromIndex SYNC} issue: https://github.com/elastic/elasticsearch/issues/112036 +- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT + method: test {mv_percentile.FromIndex ASYNC} + issue: https://github.com/elastic/elasticsearch/issues/112037 # Examples: # From 29f9ad9a64e359362e6a40ebd05453c4bb15fc57 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 20 Aug 2024 12:45:20 -0700 Subject: [PATCH 108/389] Allow for negative epoch seconds (#111938) The change to allow nanoseconds in ZonedDateTime split the epoch seconds from the nanosecond subelement. However, the epoch seconds were then written as a vlong, when in fact they could be negative if the date is before epoch. This commit changes the format to use zlong instead, which supports negatives. closes #111923 --- .../org/elasticsearch/TransportVersions.java | 3 ++- .../common/io/stream/StreamInput.java | 7 +++-- .../common/io/stream/StreamOutput.java | 9 +++++-- .../common/io/stream/AbstractStreamTests.java | 27 ++++++++++++------- 4 files changed, 32 insertions(+), 14 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 3bece535aab0f..fad57b3d6c854 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -189,10 +189,11 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_ORIGINAL_INDICES = def(8_719_00_0); public static final TransportVersion ML_INFERENCE_EIS_INTEGRATION_ADDED = def(8_720_00_0); public static final TransportVersion INGEST_PIPELINE_EXCEPTION_ADDED = def(8_721_00_0); - public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_722_00_0); + public static final TransportVersion ZDT_NANOS_SUPPORT_BROKEN = def(8_722_00_0); public static final TransportVersion REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES = def(8_723_00_0); public static final TransportVersion RANDOM_RERANKER_RETRIEVER = def(8_724_00_0); public static final TransportVersion ESQL_PROFILE_SLEEPS = def(8_725_00_0); + public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_726_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 8de49ded03a4e..c4c18cfd376ad 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -903,8 +903,11 @@ public final Instant readOptionalInstant() throws IOException { private ZonedDateTime readZonedDateTime() throws IOException { final String timeZoneId = readString(); final Instant instant; - if (getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT)) { - instant = Instant.ofEpochSecond(readVLong(), readInt()); + if (getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT_BROKEN)) { + // epoch seconds can be negative, but it was incorrectly first written as vlong + boolean zlong = getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT); + long seconds = zlong ? readZLong() : readVLong(); + instant = Instant.ofEpochSecond(seconds, readInt()); } else { instant = Instant.ofEpochMilli(readLong()); } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 9d5b9a107ee6a..c65ae2e3463d4 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -767,8 +767,13 @@ public final void writeOptionalInstant(@Nullable Instant instant) throws IOExcep final ZonedDateTime zonedDateTime = (ZonedDateTime) v; o.writeString(zonedDateTime.getZone().getId()); Instant instant = zonedDateTime.toInstant(); - if (o.getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT)) { - o.writeVLong(instant.getEpochSecond()); + if (o.getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT_BROKEN)) { + // epoch seconds can be negative, but it was incorrectly first written as vlong + if (o.getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT)) { + o.writeZLong(instant.getEpochSecond()); + } else { + o.writeVLong(instant.getEpochSecond()); + } o.writeInt(instant.getNano()); } else { o.writeLong(instant.toEpochMilli()); diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java index b4aa58ae13f7b..8451d2fd64b9c 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java @@ -10,7 +10,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -53,6 +52,8 @@ import static java.time.Instant.ofEpochSecond; import static java.time.ZonedDateTime.ofInstant; +import static org.elasticsearch.TransportVersions.ZDT_NANOS_SUPPORT; +import static org.elasticsearch.TransportVersions.ZDT_NANOS_SUPPORT_BROKEN; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; @@ -726,11 +727,15 @@ public void testReadAfterReachingEndOfStream() throws IOException { } public void testZonedDateTimeSerialization() throws IOException { - checkZonedDateTimeSerialization(TransportVersions.ZDT_NANOS_SUPPORT); + checkZonedDateTimeSerialization(ZDT_NANOS_SUPPORT); + } + + public void testZonedDateTimeMillisBwcSerializationV1() throws IOException { + checkZonedDateTimeSerialization(TransportVersionUtils.getPreviousVersion(ZDT_NANOS_SUPPORT_BROKEN)); } public void testZonedDateTimeMillisBwcSerialization() throws IOException { - checkZonedDateTimeSerialization(TransportVersionUtils.getPreviousVersion(TransportVersions.ZDT_NANOS_SUPPORT)); + checkZonedDateTimeSerialization(TransportVersionUtils.getPreviousVersion(ZDT_NANOS_SUPPORT)); } public void checkZonedDateTimeSerialization(TransportVersion tv) throws IOException { @@ -738,14 +743,18 @@ public void checkZonedDateTimeSerialization(TransportVersion tv) throws IOExcept assertGenericRoundtrip(ofInstant(ofEpochSecond(1), randomZone()), tv); // just want to test a large number that will use 5+ bytes long maxEpochSecond = Integer.MAX_VALUE; + long minEpochSecond = tv.between(ZDT_NANOS_SUPPORT_BROKEN, ZDT_NANOS_SUPPORT) ? 0 : Integer.MIN_VALUE; assertGenericRoundtrip(ofInstant(ofEpochSecond(maxEpochSecond), randomZone()), tv); - assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(0, maxEpochSecond)), randomZone()), tv); - assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(0, maxEpochSecond), 1_000_000), randomZone()), tv); - assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(0, maxEpochSecond), 999_000_000), randomZone()), tv); - if (tv.onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT)) { - assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(0, maxEpochSecond), 999_999_999), randomZone()), tv); + assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond)), randomZone()), tv); + assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond), 1_000_000), randomZone()), tv); + assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond), 999_000_000), randomZone()), tv); + if (tv.onOrAfter(ZDT_NANOS_SUPPORT)) { + assertGenericRoundtrip( + ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond), 999_999_999), randomZone()), + tv + ); assertGenericRoundtrip( - ofInstant(ofEpochSecond(randomLongBetween(0, maxEpochSecond), randomIntBetween(0, 999_999_999)), randomZone()), + ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond), randomIntBetween(0, 999_999_999)), randomZone()), tv ); } From 3a122a7d71a74346d8fd2d78130c305b0f8266bb Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 21 Aug 2024 06:24:58 +1000 Subject: [PATCH 109/389] Mute org.elasticsearch.xpack.esql.qa.single_node.RestEsqlIT testForceSleepsProfile {SYNC} #112039 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 049ec985c7456..e206d7229083a 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -184,6 +184,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT method: test {mv_percentile.FromIndex ASYNC} issue: https://github.com/elastic/elasticsearch/issues/112037 +- class: org.elasticsearch.xpack.esql.qa.single_node.RestEsqlIT + method: testForceSleepsProfile {SYNC} + issue: https://github.com/elastic/elasticsearch/issues/112039 # Examples: # From c1019d4c5d05c5f8b0b7b7dfd57ff50a64013e23 Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Tue, 20 Aug 2024 14:58:18 -0600 Subject: [PATCH 110/389] (Doc+) Link API doc to parent object - part1 (#111951) * (Doc+) Link API to parent Doc part1 --------- Co-authored-by: shainaraskas Co-authored-by: shainaraskas <58563081+shainaraskas@users.noreply.github.com> --- docs/reference/autoscaling/apis/autoscaling-apis.asciidoc | 2 +- .../autoscaling/apis/delete-autoscaling-policy.asciidoc | 2 +- .../autoscaling/apis/get-autoscaling-capacity.asciidoc | 2 +- .../autoscaling/apis/get-autoscaling-policy.asciidoc | 2 +- .../autoscaling/apis/put-autoscaling-policy.asciidoc | 2 +- docs/reference/autoscaling/deciders/fixed-decider.asciidoc | 2 +- .../autoscaling/deciders/frozen-existence-decider.asciidoc | 2 +- .../autoscaling/deciders/frozen-shards-decider.asciidoc | 2 +- .../autoscaling/deciders/frozen-storage-decider.asciidoc | 2 +- .../autoscaling/deciders/machine-learning-decider.asciidoc | 2 +- .../autoscaling/deciders/proactive-storage-decider.asciidoc | 2 +- .../autoscaling/deciders/reactive-storage-decider.asciidoc | 2 +- docs/reference/autoscaling/index.asciidoc | 2 +- .../apis/delete-analytics-collection.asciidoc | 2 +- docs/reference/behavioral-analytics/apis/index.asciidoc | 2 +- .../apis/list-analytics-collection.asciidoc | 2 +- .../apis/post-analytics-collection-event.asciidoc | 2 +- .../apis/put-analytics-collection.asciidoc | 2 +- .../ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc | 2 +- .../ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc | 2 +- .../ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc | 2 +- .../ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc | 2 +- .../ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc | 2 +- docs/reference/ccr/apis/ccr-apis.asciidoc | 2 +- docs/reference/ccr/apis/follow/get-follow-info.asciidoc | 2 +- docs/reference/ccr/apis/follow/get-follow-stats.asciidoc | 2 +- docs/reference/ccr/apis/follow/post-forget-follower.asciidoc | 2 +- docs/reference/ccr/apis/follow/post-pause-follow.asciidoc | 2 +- docs/reference/ccr/apis/follow/post-resume-follow.asciidoc | 2 +- docs/reference/ccr/apis/follow/post-unfollow.asciidoc | 2 +- docs/reference/ccr/apis/follow/put-follow.asciidoc | 2 +- docs/reference/ccr/apis/get-ccr-stats.asciidoc | 2 +- docs/reference/cluster/allocation-explain.asciidoc | 2 +- docs/reference/cluster/delete-desired-balance.asciidoc | 2 +- docs/reference/cluster/get-desired-balance.asciidoc | 2 +- .../data-streams/change-mappings-and-settings.asciidoc | 2 +- docs/reference/data-streams/downsampling-manual.asciidoc | 4 ++-- .../data-streams/lifecycle/apis/delete-lifecycle.asciidoc | 2 +- .../data-streams/lifecycle/apis/explain-lifecycle.asciidoc | 2 +- .../data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc | 2 +- .../data-streams/lifecycle/apis/get-lifecycle.asciidoc | 2 +- .../data-streams/lifecycle/apis/put-lifecycle.asciidoc | 2 +- .../tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc | 4 ++-- docs/reference/data-streams/modify-data-streams-api.asciidoc | 2 +- docs/reference/data-streams/promote-data-stream-api.asciidoc | 2 +- docs/reference/data-streams/tsds-reindex.asciidoc | 2 +- docs/reference/eql/eql-apis.asciidoc | 2 +- docs/reference/esql/esql-apis.asciidoc | 2 +- docs/reference/esql/esql-async-query-delete-api.asciidoc | 2 +- docs/reference/ilm/apis/delete-lifecycle.asciidoc | 2 +- docs/reference/ilm/apis/explain.asciidoc | 2 +- docs/reference/ilm/apis/get-lifecycle.asciidoc | 2 +- docs/reference/ilm/apis/get-status.asciidoc | 2 +- docs/reference/ilm/apis/move-to-step.asciidoc | 2 +- docs/reference/ilm/apis/put-lifecycle.asciidoc | 2 +- docs/reference/ilm/apis/remove-policy-from-index.asciidoc | 2 +- docs/reference/ilm/apis/retry-policy.asciidoc | 2 +- docs/reference/ilm/apis/start.asciidoc | 2 +- docs/reference/ilm/apis/stop.asciidoc | 2 +- docs/reference/ilm/error-handling.asciidoc | 2 +- docs/reference/ilm/ilm-index-lifecycle.asciidoc | 2 +- 61 files changed, 63 insertions(+), 63 deletions(-) diff --git a/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc b/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc index 090eda5ef5436..e4da2c45ee978 100644 --- a/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc +++ b/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc @@ -4,7 +4,7 @@ NOTE: {cloud-only} -You can use the following APIs to perform autoscaling operations. +You can use the following APIs to perform {cloud}/ec-autoscaling.html[autoscaling operations]. [discrete] [[autoscaling-api-top-level]] diff --git a/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc b/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc index 608b7bd7cb903..190428485a003 100644 --- a/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc +++ b/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc @@ -7,7 +7,7 @@ NOTE: {cloud-only} -Delete autoscaling policy. +Delete {cloud}/ec-autoscaling.html[autoscaling] policy. [[autoscaling-delete-autoscaling-policy-request]] ==== {api-request-title} diff --git a/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc b/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc index 05724b9c48b6e..d635d8c8f7bd0 100644 --- a/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc +++ b/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc @@ -7,7 +7,7 @@ NOTE: {cloud-only} -Get autoscaling capacity. +Get {cloud}/ec-autoscaling.html[autoscaling] capacity. [[autoscaling-get-autoscaling-capacity-request]] ==== {api-request-title} diff --git a/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc b/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc index ad00d69d1aeb2..973eedcb361c9 100644 --- a/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc +++ b/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc @@ -7,7 +7,7 @@ NOTE: {cloud-only} -Get autoscaling policy. +Get {cloud}/ec-autoscaling.html[autoscaling] policy. [[autoscaling-get-autoscaling-policy-request]] ==== {api-request-title} diff --git a/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc b/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc index ff79def51ebb9..e564f83411eb4 100644 --- a/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc +++ b/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc @@ -7,7 +7,7 @@ NOTE: {cloud-only} -Creates or updates an autoscaling policy. +Creates or updates an {cloud}/ec-autoscaling.html[autoscaling] policy. [[autoscaling-put-autoscaling-policy-request]] ==== {api-request-title} diff --git a/docs/reference/autoscaling/deciders/fixed-decider.asciidoc b/docs/reference/autoscaling/deciders/fixed-decider.asciidoc index c46d1dffe2cc8..5a8b009d9f063 100644 --- a/docs/reference/autoscaling/deciders/fixed-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/fixed-decider.asciidoc @@ -6,7 +6,7 @@ experimental[] [WARNING] The fixed decider is intended for testing only. Do not use this decider in production. -The `fixed` decider responds with a fixed required capacity. It is not enabled +The {cloud}/ec-autoscaling.html[autoscaling] `fixed` decider responds with a fixed required capacity. It is not enabled by default but can be enabled for any policy by explicitly configuring it. ==== Configuration settings diff --git a/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc b/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc index 832cf330053aa..0fc9ad444a213 100644 --- a/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-frozen-existence-decider]] === Frozen existence decider -The frozen existence decider (`frozen_existence`) ensures that once the first +The {cloud}/ec-autoscaling.html[autoscaling] frozen existence decider (`frozen_existence`) ensures that once the first index enters the frozen ILM phase, the frozen tier is scaled into existence. The frozen existence decider is enabled for all policies governing frozen data diff --git a/docs/reference/autoscaling/deciders/frozen-shards-decider.asciidoc b/docs/reference/autoscaling/deciders/frozen-shards-decider.asciidoc index ab11da04c8642..1977f95797ef0 100644 --- a/docs/reference/autoscaling/deciders/frozen-shards-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/frozen-shards-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-frozen-shards-decider]] === Frozen shards decider -The frozen shards decider (`frozen_shards`) calculates the memory required to search +The {cloud}/ec-autoscaling.html[autoscaling] frozen shards decider (`frozen_shards`) calculates the memory required to search the current set of partially mounted indices in the frozen tier. Based on a required memory amount per shard, it calculates the necessary memory in the frozen tier. diff --git a/docs/reference/autoscaling/deciders/frozen-storage-decider.asciidoc b/docs/reference/autoscaling/deciders/frozen-storage-decider.asciidoc index 5a10f31f1365b..3a8e7cdb518b3 100644 --- a/docs/reference/autoscaling/deciders/frozen-storage-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/frozen-storage-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-frozen-storage-decider]] === Frozen storage decider -The frozen storage decider (`frozen_storage`) calculates the local storage +The {cloud}/ec-autoscaling.html[autoscaling] frozen storage decider (`frozen_storage`) calculates the local storage required to search the current set of partially mounted indices based on a percentage of the total data set size of such indices. It signals that additional storage capacity is necessary when existing capacity is less than the diff --git a/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc b/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc index 26ced6ad7bb26..5432d96a47edb 100644 --- a/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-machine-learning-decider]] === Machine learning decider -The {ml} decider (`ml`) calculates the memory and CPU requirements to run {ml} +The {cloud}/ec-autoscaling.html[autoscaling] {ml} decider (`ml`) calculates the memory and CPU requirements to run {ml} jobs and trained models. The {ml} decider is enabled for policies governing `ml` nodes. diff --git a/docs/reference/autoscaling/deciders/proactive-storage-decider.asciidoc b/docs/reference/autoscaling/deciders/proactive-storage-decider.asciidoc index 763f1de96f6b9..33c989f3b12eb 100644 --- a/docs/reference/autoscaling/deciders/proactive-storage-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/proactive-storage-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-proactive-storage-decider]] === Proactive storage decider -The proactive storage decider (`proactive_storage`) calculates the storage required to contain +The {cloud}/ec-autoscaling.html[autoscaling] proactive storage decider (`proactive_storage`) calculates the storage required to contain the current data set plus an estimated amount of expected additional data. The proactive storage decider is enabled for all policies governing nodes with the `data_hot` role. diff --git a/docs/reference/autoscaling/deciders/reactive-storage-decider.asciidoc b/docs/reference/autoscaling/deciders/reactive-storage-decider.asciidoc index 50897178a88de..7c38df75169fd 100644 --- a/docs/reference/autoscaling/deciders/reactive-storage-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/reactive-storage-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-reactive-storage-decider]] === Reactive storage decider -The reactive storage decider (`reactive_storage`) calculates the storage required to contain +The {cloud}/ec-autoscaling.html[autoscaling] reactive storage decider (`reactive_storage`) calculates the storage required to contain the current data set. It signals that additional storage capacity is necessary when existing capacity has been exceeded (reactively). diff --git a/docs/reference/autoscaling/index.asciidoc b/docs/reference/autoscaling/index.asciidoc index fbf1a9536973e..e70c464889419 100644 --- a/docs/reference/autoscaling/index.asciidoc +++ b/docs/reference/autoscaling/index.asciidoc @@ -4,7 +4,7 @@ NOTE: {cloud-only} -The autoscaling feature enables an operator to configure tiers of nodes that +The {cloud}/ec-autoscaling.html[autoscaling] feature enables an operator to configure tiers of nodes that self-monitor whether or not they need to scale based on an operator-defined policy. Then, via the autoscaling API, an Elasticsearch cluster can report whether or not it needs additional resources to meet the policy. For example, an diff --git a/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc b/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc index 9b15bcca3fc85..a6894a933b460 100644 --- a/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc +++ b/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc @@ -17,7 +17,7 @@ PUT _application/analytics/my_analytics_collection //// -Removes an Analytics Collection and its associated data stream. +Removes a <> Collection and its associated data stream. [[delete-analytics-collection-request]] ==== {api-request-title} diff --git a/docs/reference/behavioral-analytics/apis/index.asciidoc b/docs/reference/behavioral-analytics/apis/index.asciidoc index 042b50259b1bb..692d3374f89f5 100644 --- a/docs/reference/behavioral-analytics/apis/index.asciidoc +++ b/docs/reference/behavioral-analytics/apis/index.asciidoc @@ -9,7 +9,7 @@ beta::[] --- -Use the following APIs to manage tasks and resources related to Behavioral Analytics: +Use the following APIs to manage tasks and resources related to <>: * <> * <> diff --git a/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc b/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc index 8d2491ff8a6ee..14511a1258278 100644 --- a/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc +++ b/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc @@ -24,7 +24,7 @@ DELETE _application/analytics/my_analytics_collection2 // TEARDOWN //// -Returns information about Analytics Collections. +Returns information about <> Collections. [[list-analytics-collection-request]] ==== {api-request-title} diff --git a/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc b/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc index 84d9cb5351799..f82717e22ed34 100644 --- a/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc +++ b/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc @@ -22,7 +22,7 @@ DELETE _application/analytics/my_analytics_collection // TEARDOWN //// -Post an event to an Analytics Collection. +Post an event to a <> Collection. [[post-analytics-collection-event-request]] ==== {api-request-title} diff --git a/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc b/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc index 48273fb3906c4..cbbab2ae3e26c 100644 --- a/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc +++ b/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc @@ -16,7 +16,7 @@ DELETE _application/analytics/my_analytics_collection // TEARDOWN //// -Creates an Analytics Collection. +Creates a <> Collection. [[put-analytics-collection-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc index 1c72fb8742b93..b510163bab50b 100644 --- a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Delete auto-follow pattern ++++ -Delete auto-follow patterns. +Delete {ccr} <>. [[ccr-delete-auto-follow-pattern-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc index 46ef288b05088..a2969e993ddfb 100644 --- a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Get auto-follow pattern ++++ -Get auto-follow patterns. +Get {ccr} <>. [[ccr-get-auto-follow-pattern-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc index 1e64ab813e2ad..c5ae5a7b4af9d 100644 --- a/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Pause auto-follow pattern ++++ -Pauses an auto-follow pattern. +Pauses a {ccr} <>. [[ccr-pause-auto-follow-pattern-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc index d08997068f705..6769f21ca5cef 100644 --- a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Create auto-follow pattern ++++ -Creates an auto-follow pattern. +Creates a {ccr} <>. [[ccr-put-auto-follow-pattern-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc index 04da9b4a35ba0..a580bb3838f9b 100644 --- a/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Resume auto-follow pattern ++++ -Resumes an auto-follow pattern. +Resumes a {ccr} <>. [[ccr-resume-auto-follow-pattern-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/ccr-apis.asciidoc b/docs/reference/ccr/apis/ccr-apis.asciidoc index 0c9f033639eda..ae94e1931af85 100644 --- a/docs/reference/ccr/apis/ccr-apis.asciidoc +++ b/docs/reference/ccr/apis/ccr-apis.asciidoc @@ -2,7 +2,7 @@ [[ccr-apis]] == {ccr-cap} APIs -You can use the following APIs to perform {ccr} operations. +You can use the following APIs to perform <> operations. [discrete] [[ccr-api-top-level]] diff --git a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc index 68fd6e210f884..6c049d9c92b59 100644 --- a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc @@ -5,7 +5,7 @@ Get follower info ++++ -Retrieves information about all follower indices. +Retrieves information about all <> follower indices. [[ccr-get-follow-info-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc index 72224cc7f51f4..4892f86b3523d 100644 --- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc @@ -5,7 +5,7 @@ Get follower stats ++++ -Get follower stats. +Get <> follower stats. [[ccr-get-follow-stats-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc index ea7e8640056bf..1917c08d6640d 100644 --- a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc +++ b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc @@ -5,7 +5,7 @@ Forget follower ++++ -Removes the follower retention leases from the leader. +Removes the <> follower retention leases from the leader. [[ccr-post-forget-follower-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc index a4ab69aba8d84..6d4730d10efe6 100644 --- a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc @@ -5,7 +5,7 @@ Pause follower ++++ -Pauses a follower index. +Pauses a <> follower index. [[ccr-post-pause-follow-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc index 47ba51a3fb8a0..b023a8cb5cb70 100644 --- a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc @@ -5,7 +5,7 @@ Resume follower ++++ -Resumes a follower index. +Resumes a <> follower index. [[ccr-post-resume-follow-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc index b96777b455d3b..dab11ef9e7a54 100644 --- a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc @@ -5,7 +5,7 @@ Unfollow ++++ -Converts a follower index to a regular index. +Converts a <> follower index to a regular index. [[ccr-post-unfollow-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc index eb83e2a13dcf1..b7ae9ac987474 100644 --- a/docs/reference/ccr/apis/follow/put-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc @@ -5,7 +5,7 @@ Create follower ++++ -Creates a follower index. +Creates a <> follower index. [[ccr-put-follow-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/get-ccr-stats.asciidoc b/docs/reference/ccr/apis/get-ccr-stats.asciidoc index 128df5e47c777..92e6bae0bdce8 100644 --- a/docs/reference/ccr/apis/get-ccr-stats.asciidoc +++ b/docs/reference/ccr/apis/get-ccr-stats.asciidoc @@ -6,7 +6,7 @@ Get {ccr-init} stats ++++ -Get {ccr} stats. +Get <> stats. [[ccr-get-stats-request]] ==== {api-request-title} diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc index 0b0fde6546c29..809c9d74f1450 100644 --- a/docs/reference/cluster/allocation-explain.asciidoc +++ b/docs/reference/cluster/allocation-explain.asciidoc @@ -4,7 +4,7 @@ Cluster allocation explain ++++ -Provides an explanation for a shard's current allocation. +Provides an explanation for a shard's current <>. [source,console] ---- diff --git a/docs/reference/cluster/delete-desired-balance.asciidoc b/docs/reference/cluster/delete-desired-balance.asciidoc index f81dcab011da4..c67834269e505 100644 --- a/docs/reference/cluster/delete-desired-balance.asciidoc +++ b/docs/reference/cluster/delete-desired-balance.asciidoc @@ -6,7 +6,7 @@ NOTE: {cloud-only} -Discards the current desired balance and computes a new desired balance starting from the current allocation of shards. +Discards the current <> and computes a new desired balance starting from the current allocation of shards. This can sometimes help {es} find a desired balance which needs fewer shard movements to achieve, especially if the cluster has experienced changes so substantial that the current desired balance is no longer optimal without {es} having detected that the current desired balance will take more shard movements to achieve than needed. However, this API diff --git a/docs/reference/cluster/get-desired-balance.asciidoc b/docs/reference/cluster/get-desired-balance.asciidoc index 3fd87dcfedc4f..74afdaa52daf1 100644 --- a/docs/reference/cluster/get-desired-balance.asciidoc +++ b/docs/reference/cluster/get-desired-balance.asciidoc @@ -8,7 +8,7 @@ NOTE: {cloud-only} Exposes: -* the desired balance computation and reconciliation stats +* the <> computation and reconciliation stats * balancing stats such as distribution of shards, disk and ingest forecasts across nodes and data tiers (based on the current cluster state) * routing table with each shard current and desired location diff --git a/docs/reference/data-streams/change-mappings-and-settings.asciidoc b/docs/reference/data-streams/change-mappings-and-settings.asciidoc index 076b315558b60..1290f289e5bbd 100644 --- a/docs/reference/data-streams/change-mappings-and-settings.asciidoc +++ b/docs/reference/data-streams/change-mappings-and-settings.asciidoc @@ -5,7 +5,7 @@ [[data-streams-change-mappings-and-settings]] === Change mappings and settings for a data stream -Each data stream has a <> has a <>. Mappings and index settings from this template are applied to new backing indices created for the stream. This includes the stream's first backing index, which is auto-generated when the stream is created. diff --git a/docs/reference/data-streams/downsampling-manual.asciidoc b/docs/reference/data-streams/downsampling-manual.asciidoc index 771a08d97d949..44ae77d072034 100644 --- a/docs/reference/data-streams/downsampling-manual.asciidoc +++ b/docs/reference/data-streams/downsampling-manual.asciidoc @@ -14,7 +14,7 @@ DELETE _ingest/pipeline/my-timestamp-pipeline // TEARDOWN //// -The recommended way to downsample a time series data stream (TSDS) is +The recommended way to <> a <> is <>. However, if you're not using ILM, you can downsample a TSDS manually. This guide shows you how, using typical Kubernetes cluster monitoring data. @@ -32,7 +32,7 @@ To test out manual downsampling, follow these steps: ==== Prerequisites * Refer to the <>. -* It is not possible to downsample a data stream directly, nor +* It is not possible to downsample a <> directly, nor multiple indices at once. It's only possible to downsample one time series index (TSDS backing index). * In order to downsample an index, it needs to be read-only. For a TSDS write diff --git a/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc index f20c949c2fbc8..315f7fa85e45f 100644 --- a/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc @@ -4,7 +4,7 @@ Delete Data Stream Lifecycle ++++ -Deletes the lifecycle from a set of data streams. +Deletes the <> from a set of data streams. [[delete-lifecycle-api-prereqs]] ==== {api-prereq-title} diff --git a/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc index 7968bb78939e8..2b15886ebe192 100644 --- a/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc @@ -4,7 +4,7 @@ Explain Data Stream Lifecycle ++++ -Retrieves the current data stream lifecycle status for one or more data stream backing indices. +Retrieves the current <> status for one or more data stream backing indices. [[explain-lifecycle-api-prereqs]] ==== {api-prereq-title} diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc index a99fa19d9db8d..f48fa1eb52daa 100644 --- a/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc @@ -4,7 +4,7 @@ Get Data Stream Lifecycle ++++ -Gets stats about the execution of data stream lifecycle. +Gets stats about the execution of <>. [[get-lifecycle-stats-api-prereqs]] ==== {api-prereq-title} diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc index 331285af395b6..c83572a4e0795 100644 --- a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc @@ -4,7 +4,7 @@ Get Data Stream Lifecycle ++++ -Gets the lifecycle of a set of data streams. +Gets the <> of a set of <>. [[get-lifecycle-api-prereqs]] ==== {api-prereq-title} diff --git a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc index 7d33a5b5f880c..c60c105e818ab 100644 --- a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc @@ -4,7 +4,7 @@ Put Data Stream Lifecycle ++++ -Configures the data stream lifecycle for the targeted data streams. +Configures the data stream <> for the targeted <>. [[put-lifecycle-api-prereqs]] ==== {api-prereq-title} diff --git a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc index 5b2e2a1ec70a2..8d959d8f4ad84 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc @@ -2,8 +2,8 @@ [[tutorial-migrate-data-stream-from-ilm-to-dsl]] === Tutorial: Migrate ILM managed data stream to data stream lifecycle -In this tutorial we'll look at migrating an existing data stream from Index Lifecycle Management ({ilm-init}) to -data stream lifecycle. The existing {ilm-init} managed backing indices will continue +In this tutorial we'll look at migrating an existing data stream from <> to +<>. The existing {ilm-init} managed backing indices will continue to be managed by {ilm-init} until they age out and get deleted by {ilm-init}; however, the new backing indices will be managed by data stream lifecycle. This way, a data stream is gradually migrated away from being managed by {ilm-init} to diff --git a/docs/reference/data-streams/modify-data-streams-api.asciidoc b/docs/reference/data-streams/modify-data-streams-api.asciidoc index f05e76e67c32f..2da869083df22 100644 --- a/docs/reference/data-streams/modify-data-streams-api.asciidoc +++ b/docs/reference/data-streams/modify-data-streams-api.asciidoc @@ -4,7 +4,7 @@ Modify data streams ++++ -Performs one or more data stream modification actions in a single atomic +Performs one or more <> modification actions in a single atomic operation. [source,console] diff --git a/docs/reference/data-streams/promote-data-stream-api.asciidoc b/docs/reference/data-streams/promote-data-stream-api.asciidoc index 281e9b549abcb..111c7a2256f8a 100644 --- a/docs/reference/data-streams/promote-data-stream-api.asciidoc +++ b/docs/reference/data-streams/promote-data-stream-api.asciidoc @@ -5,7 +5,7 @@ Promote data stream ++++ -The purpose of the promote data stream api is to turn +The purpose of the promote <> API is to turn a data stream that is replicated by CCR into a regular data stream. diff --git a/docs/reference/data-streams/tsds-reindex.asciidoc b/docs/reference/data-streams/tsds-reindex.asciidoc index ea4ba16df5c4a..9d6594db4e779 100644 --- a/docs/reference/data-streams/tsds-reindex.asciidoc +++ b/docs/reference/data-streams/tsds-reindex.asciidoc @@ -9,7 +9,7 @@ [[tsds-reindex-intro]] ==== Introduction -With reindexing, you can copy documents from an old time-series data stream (TSDS) to a new one. Data streams support +With reindexing, you can copy documents from an old <> to a new one. Data streams support reindexing in general, with a few <>. Still, time-series data streams introduce additional challenges due to tight control on the accepted timestamp range for each backing index they contain. Direct use of the reindex API would likely error out due to attempting to insert documents with timestamps that are diff --git a/docs/reference/eql/eql-apis.asciidoc b/docs/reference/eql/eql-apis.asciidoc index d3f591ccfe6c1..e8cc2b21492ae 100644 --- a/docs/reference/eql/eql-apis.asciidoc +++ b/docs/reference/eql/eql-apis.asciidoc @@ -1,7 +1,7 @@ [[eql-apis]] == EQL APIs -Event Query Language (EQL) is a query language for event-based time series data, +<> is a query language for event-based time series data, such as logs, metrics, and traces. For an overview of EQL and related tutorials, see <>. diff --git a/docs/reference/esql/esql-apis.asciidoc b/docs/reference/esql/esql-apis.asciidoc index 686a71506bc14..8586cd1ae6bce 100644 --- a/docs/reference/esql/esql-apis.asciidoc +++ b/docs/reference/esql/esql-apis.asciidoc @@ -1,7 +1,7 @@ [[esql-apis]] == {esql} APIs -The {es} Query Language ({esql}) provides a powerful way to filter, transform, +The <> provides a powerful way to filter, transform, and analyze data stored in {es}, and in the future in other runtimes. For an overview of {esql} and related tutorials, see <>. diff --git a/docs/reference/esql/esql-async-query-delete-api.asciidoc b/docs/reference/esql/esql-async-query-delete-api.asciidoc index 90f8c06b9124a..5cad566f7f9c0 100644 --- a/docs/reference/esql/esql-async-query-delete-api.asciidoc +++ b/docs/reference/esql/esql-async-query-delete-api.asciidoc @@ -4,7 +4,7 @@ {esql} async query delete API ++++ -The {esql} async query delete API is used to manually delete an async query +The <> async query delete API is used to manually delete an async query by ID. If the query is still running, the query will be cancelled. Otherwise, the stored results are deleted. diff --git a/docs/reference/ilm/apis/delete-lifecycle.asciidoc b/docs/reference/ilm/apis/delete-lifecycle.asciidoc index 632cb982b3968..fc9a35e4ef570 100644 --- a/docs/reference/ilm/apis/delete-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/delete-lifecycle.asciidoc @@ -5,7 +5,7 @@ Delete policy ++++ -Deletes an index lifecycle policy. +Deletes an index <> policy. [[ilm-delete-lifecycle-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc index 348a9e7f99e78..a1ddde8c9f2d9 100644 --- a/docs/reference/ilm/apis/explain.asciidoc +++ b/docs/reference/ilm/apis/explain.asciidoc @@ -5,7 +5,7 @@ Explain lifecycle ++++ -Retrieves the current lifecycle status for one or more indices. For data +Retrieves the current <> status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream's backing indices. diff --git a/docs/reference/ilm/apis/get-lifecycle.asciidoc b/docs/reference/ilm/apis/get-lifecycle.asciidoc index 7443610065487..b4e07389a9fb7 100644 --- a/docs/reference/ilm/apis/get-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/get-lifecycle.asciidoc @@ -5,7 +5,7 @@ Get policy ++++ -Retrieves a lifecycle policy. +Retrieves a <> policy. [[ilm-get-lifecycle-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/apis/get-status.asciidoc b/docs/reference/ilm/apis/get-status.asciidoc index 7e9e963f6f369..f2ab8d65ec9a1 100644 --- a/docs/reference/ilm/apis/get-status.asciidoc +++ b/docs/reference/ilm/apis/get-status.asciidoc @@ -7,7 +7,7 @@ Get {ilm} status ++++ -Retrieves the current {ilm} ({ilm-init}) status. +Retrieves the current <> ({ilm-init}) status. You can start or stop {ilm-init} with the <> and <> APIs. diff --git a/docs/reference/ilm/apis/move-to-step.asciidoc b/docs/reference/ilm/apis/move-to-step.asciidoc index 19cc9f7088867..f3441fa997cff 100644 --- a/docs/reference/ilm/apis/move-to-step.asciidoc +++ b/docs/reference/ilm/apis/move-to-step.asciidoc @@ -5,7 +5,7 @@ Move to step ++++ -Triggers execution of a specific step in the lifecycle policy. +Triggers execution of a specific step in the <> policy. [[ilm-move-to-step-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/apis/put-lifecycle.asciidoc b/docs/reference/ilm/apis/put-lifecycle.asciidoc index ffd59a14d8c25..390f6b1bb4d15 100644 --- a/docs/reference/ilm/apis/put-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/put-lifecycle.asciidoc @@ -5,7 +5,7 @@ Create or update lifecycle policy ++++ -Creates or updates lifecycle policy. See <> for +Creates or updates <> policy. See <> for definitions of policy components. [[ilm-put-lifecycle-request]] diff --git a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc index 711eccc298df1..107cab4d5aa19 100644 --- a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc +++ b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc @@ -5,7 +5,7 @@ Remove policy ++++ -Removes assigned lifecycle policies from an index or a data stream's backing +Removes assigned <> policies from an index or a data stream's backing indices. [[ilm-remove-policy-request]] diff --git a/docs/reference/ilm/apis/retry-policy.asciidoc b/docs/reference/ilm/apis/retry-policy.asciidoc index cb2587fbb151b..8f01f15e0c3ad 100644 --- a/docs/reference/ilm/apis/retry-policy.asciidoc +++ b/docs/reference/ilm/apis/retry-policy.asciidoc @@ -5,7 +5,7 @@ Retry policy ++++ -Retry executing the policy for an index that is in the ERROR step. +Retry executing the <> policy for an index that is in the ERROR step. [[ilm-retry-policy-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/apis/start.asciidoc b/docs/reference/ilm/apis/start.asciidoc index 32db585c6b14c..c38b3d9ca8831 100644 --- a/docs/reference/ilm/apis/start.asciidoc +++ b/docs/reference/ilm/apis/start.asciidoc @@ -7,7 +7,7 @@ Start {ilm} ++++ -Start the {ilm} ({ilm-init}) plugin. +Start the <> ({ilm-init}) plugin. [[ilm-start-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/apis/stop.asciidoc b/docs/reference/ilm/apis/stop.asciidoc index 1e9cfb94d0b1f..a6100d794c2d3 100644 --- a/docs/reference/ilm/apis/stop.asciidoc +++ b/docs/reference/ilm/apis/stop.asciidoc @@ -7,7 +7,7 @@ Stop {ilm} ++++ -Stop the {ilm} ({ilm-init}) plugin. +Stop the <> ({ilm-init}) plugin. [[ilm-stop-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/error-handling.asciidoc b/docs/reference/ilm/error-handling.asciidoc index d922fa6687823..f810afc6c2b5f 100644 --- a/docs/reference/ilm/error-handling.asciidoc +++ b/docs/reference/ilm/error-handling.asciidoc @@ -2,7 +2,7 @@ [[index-lifecycle-error-handling]] == Troubleshooting {ilm} errors -When {ilm-init} executes a lifecycle policy, it's possible for errors to occur +When <> executes a lifecycle policy, it's possible for errors to occur while performing the necessary index operations for a step. When this happens, {ilm-init} moves the index to an `ERROR` step. If {ilm-init} cannot resolve the error automatically, execution is halted diff --git a/docs/reference/ilm/ilm-index-lifecycle.asciidoc b/docs/reference/ilm/ilm-index-lifecycle.asciidoc index acf59645dae13..040e02742f5e7 100644 --- a/docs/reference/ilm/ilm-index-lifecycle.asciidoc +++ b/docs/reference/ilm/ilm-index-lifecycle.asciidoc @@ -5,7 +5,7 @@ Index lifecycle ++++ -{ilm-init} defines five index lifecycle _phases_: +<> defines five index lifecycle _phases_: * **Hot**: The index is actively being updated and queried. * **Warm**: The index is no longer being updated but is still being queried. From 3b8c9ad5dd2467298cedfad7caa6977b3a81cbda Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 21 Aug 2024 09:51:26 +0700 Subject: [PATCH 111/389] Pin default codec to index mode. (#111997) By default, the 'default' codec will be used as default, in case of logsdb index mode best_compression codec will be used as default. --- .../org/elasticsearch/index/IndexMode.java | 10 +++++++ .../codec/zstd/Zstd814StoredFieldsFormat.java | 4 +++ .../index/engine/EngineConfig.java | 8 ++++-- ...cTests.java => CodecIntegrationTests.java} | 27 ++++++++++++++++++- 4 files changed, 46 insertions(+), 3 deletions(-) rename server/src/test/java/org/elasticsearch/index/codec/{LegacyCodecTests.java => CodecIntegrationTests.java} (51%) diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index 49eb6d84f0b1e..b137cfe27a514 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.DocumentDimensions; @@ -297,6 +298,11 @@ public void validateSourceFieldMapper(SourceFieldMapper sourceFieldMapper) { public boolean isSyntheticSourceEnabled() { return true; } + + @Override + public String getDefaultCodec() { + return CodecService.BEST_COMPRESSION_CODEC; + } }; private static void validateTimeSeriesSettings(Map, Object> settings) { @@ -466,6 +472,10 @@ public String getName() { */ public abstract boolean isSyntheticSourceEnabled(); + public String getDefaultCodec() { + return CodecService.DEFAULT_CODEC; + } + /** * Parse a string into an {@link IndexMode}. */ diff --git a/server/src/main/java/org/elasticsearch/index/codec/zstd/Zstd814StoredFieldsFormat.java b/server/src/main/java/org/elasticsearch/index/codec/zstd/Zstd814StoredFieldsFormat.java index b827bb6436f07..840b37611374a 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/zstd/Zstd814StoredFieldsFormat.java +++ b/server/src/main/java/org/elasticsearch/index/codec/zstd/Zstd814StoredFieldsFormat.java @@ -78,6 +78,10 @@ public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOCo return super.fieldsWriter(directory, si, context); } + public Mode getMode() { + return mode; + } + private static class ZstdCompressionMode extends CompressionMode { private final int level; diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 079d6479a63e4..317adcc67cf59 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.unit.MemorySizeValue; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.CodecProvider; import org.elasticsearch.index.codec.CodecService; @@ -96,7 +97,10 @@ public Supplier retentionLeasesSupplier() { * This setting is also settable on the node and the index level, it's commonly used in hot/cold node archs where index is likely * allocated on both `kind` of nodes. */ - public static final Setting INDEX_CODEC_SETTING = new Setting<>("index.codec", "default", s -> { + public static final Setting INDEX_CODEC_SETTING = new Setting<>("index.codec", settings -> { + IndexMode indexMode = IndexSettings.MODE.get(settings); + return indexMode.getDefaultCodec(); + }, s -> { switch (s) { case CodecService.DEFAULT_CODEC: case CodecService.LEGACY_DEFAULT_CODEC: @@ -181,7 +185,7 @@ public EngineConfig( this.similarity = similarity; this.codecProvider = codecProvider; this.eventListener = eventListener; - codecName = indexSettings.getValue(INDEX_CODEC_SETTING); + this.codecName = indexSettings.getValue(INDEX_CODEC_SETTING); this.mapperService = mapperService; // We need to make the indexing buffer for this shard at least as large // as the amount of memory that is available for all engines on the diff --git a/server/src/test/java/org/elasticsearch/index/codec/LegacyCodecTests.java b/server/src/test/java/org/elasticsearch/index/codec/CodecIntegrationTests.java similarity index 51% rename from server/src/test/java/org/elasticsearch/index/codec/LegacyCodecTests.java rename to server/src/test/java/org/elasticsearch/index/codec/CodecIntegrationTests.java index dbe83af1a0cfb..05b9cf42e6236 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/LegacyCodecTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/CodecIntegrationTests.java @@ -9,11 +9,12 @@ package org.elasticsearch.index.codec; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.codec.zstd.Zstd814StoredFieldsFormat; import org.elasticsearch.test.ESSingleNodeTestCase; import static org.hamcrest.Matchers.equalTo; -public class LegacyCodecTests extends ESSingleNodeTestCase { +public class CodecIntegrationTests extends ESSingleNodeTestCase { public void testCanConfigureLegacySettings() { assumeTrue("Only when zstd_stored_fields feature flag is enabled", CodecService.ZSTD_STORED_FIELDS_FEATURE_FLAG.isEnabled()); @@ -26,4 +27,28 @@ public void testCanConfigureLegacySettings() { codec = client().admin().indices().prepareGetSettings("index2").execute().actionGet().getSetting("index2", "index.codec"); assertThat(codec, equalTo("legacy_best_compression")); } + + public void testDefaultCodecLogsdb() { + assumeTrue("Only when zstd_stored_fields feature flag is enabled", CodecService.ZSTD_STORED_FIELDS_FEATURE_FLAG.isEnabled()); + + var indexService = createIndex("index1", Settings.builder().put("index.mode", "logsdb").build()); + var storedFieldsFormat = (Zstd814StoredFieldsFormat) indexService.getShard(0) + .getEngineOrNull() + .config() + .getCodec() + .storedFieldsFormat(); + assertThat(storedFieldsFormat.getMode(), equalTo(Zstd814StoredFieldsFormat.Mode.BEST_COMPRESSION)); + } + + public void testDefaultCodec() { + assumeTrue("Only when zstd_stored_fields feature flag is enabled", CodecService.ZSTD_STORED_FIELDS_FEATURE_FLAG.isEnabled()); + + var indexService = createIndex("index1"); + var storedFieldsFormat = (Zstd814StoredFieldsFormat) indexService.getShard(0) + .getEngineOrNull() + .config() + .getCodec() + .storedFieldsFormat(); + assertThat(storedFieldsFormat.getMode(), equalTo(Zstd814StoredFieldsFormat.Mode.BEST_SPEED)); + } } From 3153bd0c63e667b6328b9e96b9fdd2876c2b1188 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 21 Aug 2024 14:32:43 +1000 Subject: [PATCH 112/389] Mute org.elasticsearch.xpack.esql.qa.single_node.RestEsqlIT testForceSleepsProfile {ASYNC} #112049 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index e206d7229083a..011de13677436 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -187,6 +187,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.single_node.RestEsqlIT method: testForceSleepsProfile {SYNC} issue: https://github.com/elastic/elasticsearch/issues/112039 +- class: org.elasticsearch.xpack.esql.qa.single_node.RestEsqlIT + method: testForceSleepsProfile {ASYNC} + issue: https://github.com/elastic/elasticsearch/issues/112049 # Examples: # From a294265bfd2b2e1097eea24c3a51dc6aacd11085 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Wed, 21 Aug 2024 09:34:08 +0300 Subject: [PATCH 113/389] Re-enable xpack yaml tests (#112031) --- muted-tests.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 011de13677436..5a92196e4e51b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -173,8 +173,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/111923 - class: org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT issue: https://github.com/elastic/elasticsearch/issues/111923 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - issue: https://github.com/elastic/elasticsearch/issues/111944 - class: org.elasticsearch.xpack.esql.qa.mixed.FieldExtractorIT method: testScaledFloat issue: https://github.com/elastic/elasticsearch/issues/112003 From a121fcb791705d5b1215665e0525260dee6212de Mon Sep 17 00:00:00 2001 From: Jan Kuipers <148754765+jan-elastic@users.noreply.github.com> Date: Wed, 21 Aug 2024 08:41:07 +0200 Subject: [PATCH 114/389] Fix validation expception message if adaptive allocations is behind feature flag (#111970) --- .../ElasticsearchInternalServiceSettings.java | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java index 1acf19c5373b7..8de791325a6df 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java @@ -16,6 +16,7 @@ import org.elasticsearch.inference.ServiceSettings; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsFeatureFlag; import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.inference.services.ServiceUtils; @@ -87,12 +88,18 @@ protected static ElasticsearchInternalServiceSettings.Builder fromMap( String modelId = extractOptionalString(map, MODEL_ID, ModelConfigurations.SERVICE_SETTINGS, validationException); if (numAllocations == null && adaptiveAllocationsSettings == null) { - validationException.addValidationError( - ServiceUtils.missingOneOfSettingsErrorMsg( - List.of(NUM_ALLOCATIONS, ADAPTIVE_ALLOCATIONS), - ModelConfigurations.SERVICE_SETTINGS - ) - ); + if (AdaptiveAllocationsFeatureFlag.isEnabled()) { + validationException.addValidationError( + ServiceUtils.missingOneOfSettingsErrorMsg( + List.of(NUM_ALLOCATIONS, ADAPTIVE_ALLOCATIONS), + ModelConfigurations.SERVICE_SETTINGS + ) + ); + } else { + validationException.addValidationError( + ServiceUtils.missingSettingErrorMsg(NUM_ALLOCATIONS, ModelConfigurations.SERVICE_SETTINGS) + ); + } } // if an error occurred while parsing, we'll set these to an invalid value, so we don't accidentally get a From 0d38528e0e6f01010ba4a6d11d84ff7a5ddf32f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Wed, 21 Aug 2024 09:11:48 +0200 Subject: [PATCH 115/389] Fix testLicenseTombstoneWithUsedTrialFromXContext (#112051) Relates: https://github.com/elastic/elasticsearch/issues/103093 --- .../license/LicensesMetadataSerializationTests.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java index be43705984435..e2218dfab1f1c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.license; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.RepositoriesMetadata; @@ -121,13 +120,12 @@ public void testLicenseTombstoneFromXContext() throws Exception { assertThat(metadataFromXContent.getLicense(), equalTo(LicensesMetadata.LICENSE_TOMBSTONE)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103093") public void testLicenseTombstoneWithUsedTrialFromXContext() throws Exception { final XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); builder.startObject("licenses"); builder.nullField("license"); - builder.field("trial_license", Version.CURRENT.toString()); + builder.field("trial_license", TrialLicenseVersion.CURRENT); builder.endObject(); builder.endObject(); LicensesMetadata metadataFromXContent = getLicensesMetadataFromXContent(createParser(builder)); From 2681bb867d970048553ead7a38e995bf88616e90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iv=C3=A1n=20Cea=20Fontenla?= Date: Wed, 21 Aug 2024 10:18:21 +0200 Subject: [PATCH 116/389] ESQL: Added exception catching to aggregators (#111829) Add the `warnExceptions` capability to aggregators. Same as for evaluators. This requires: - Having a "Warnings-like" object in the compute module. I created a new class for that, temporarily - Added new "FaillibleState" objects, that hold a "failed" boolean/bitset to mark failed groups - Catching exceptions in all combine methods receiving a single group (Not catching in evaluateFinal/Intermediate, nor in combines that receive the full state) - Shortcircuiting operations if the group is in "failed" state - Having a third intermediate state: `values, seen, failed` Extracted from https://github.com/elastic/elasticsearch/pull/111639. Check it to see a use case --- .../elasticsearch/compute/ann/Aggregator.java | 6 + .../compute/ann/GroupingAggregator.java | 6 + x-pack/plugin/esql/compute/build.gradle | 52 ++++++ ...AggregatorFunctionSupplierImplementer.java | 56 ++++-- .../compute/gen/AggregatorImplementer.java | 159 ++++++++++++----- .../compute/gen/AggregatorProcessor.java | 17 +- .../compute/gen/Annotations.java | 45 +++++ .../compute/gen/EvaluatorProcessor.java | 35 +--- .../gen/GroupingAggregatorImplementer.java | 132 ++++++++++---- .../org/elasticsearch/compute/gen/Types.java | 7 + .../BooleanFallibleArrayState.java | 125 +++++++++++++ .../aggregation/BooleanFallibleState.java | 62 +++++++ .../compute/aggregation/BooleanState.java | 4 - .../aggregation/DoubleFallibleArrayState.java | 124 +++++++++++++ .../aggregation/DoubleFallibleState.java | 62 +++++++ .../compute/aggregation/DoubleState.java | 4 - .../aggregation/FloatFallibleArrayState.java | 124 +++++++++++++ .../aggregation/FloatFallibleState.java | 62 +++++++ .../compute/aggregation/FloatState.java | 4 - .../aggregation/IntFallibleArrayState.java | 124 +++++++++++++ .../compute/aggregation/IntFallibleState.java | 62 +++++++ .../compute/aggregation/IntState.java | 4 - .../aggregation/LongFallibleArrayState.java | 130 ++++++++++++++ .../aggregation/LongFallibleState.java | 62 +++++++ .../compute/aggregation/LongState.java | 4 - .../aggregation/AbstractArrayState.java | 7 + .../AbstractFallibleArrayState.java | 48 +++++ .../aggregation/CountAggregatorFunction.java | 2 +- .../compute/aggregation/Warnings.java | 74 ++++++++ .../aggregation/X-FallibleArrayState.java.st | 166 ++++++++++++++++++ .../aggregation/X-FallibleState.java.st | 62 +++++++ .../compute/aggregation/X-State.java.st | 8 - 32 files changed, 1690 insertions(+), 149 deletions(-) create mode 100644 x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Annotations.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanFallibleArrayState.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanFallibleState.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleFallibleArrayState.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleFallibleState.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatFallibleArrayState.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatFallibleState.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntFallibleArrayState.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntFallibleState.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongFallibleArrayState.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongFallibleState.java create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractFallibleArrayState.java create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/Warnings.java create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleArrayState.java.st create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleState.java.st diff --git a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/Aggregator.java b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/Aggregator.java index 69db6a1310c9e..444dbcc1b9e58 100644 --- a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/Aggregator.java +++ b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/Aggregator.java @@ -57,4 +57,10 @@ IntermediateState[] value() default {}; + /** + * Exceptions thrown by the `combine*(...)` methods to catch and convert + * into a warning and turn into a null value. + */ + Class[] warnExceptions() default {}; + } diff --git a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/GroupingAggregator.java b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/GroupingAggregator.java index 0216ea07e5c7c..8d81b60e20e4d 100644 --- a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/GroupingAggregator.java +++ b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/GroupingAggregator.java @@ -22,6 +22,12 @@ IntermediateState[] value() default {}; + /** + * Exceptions thrown by the `combine*(...)` methods to catch and convert + * into a warning and turn into a null value. + */ + Class[] warnExceptions() default {}; + /** * If {@code true} then the @timestamp LongVector will be appended to the input blocks of the aggregation function. */ diff --git a/x-pack/plugin/esql/compute/build.gradle b/x-pack/plugin/esql/compute/build.gradle index ccf93a277a50d..971bfd39c231f 100644 --- a/x-pack/plugin/esql/compute/build.gradle +++ b/x-pack/plugin/esql/compute/build.gradle @@ -446,6 +446,32 @@ tasks.named('stringTemplates').configure { it.inputFile = stateInputFile it.outputFile = "org/elasticsearch/compute/aggregation/DoubleState.java" } + File fallibleStateInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleState.java.st") + template { + it.properties = booleanProperties + it.inputFile = fallibleStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/BooleanFallibleState.java" + } + template { + it.properties = intProperties + it.inputFile = fallibleStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/IntFallibleState.java" + } + template { + it.properties = longProperties + it.inputFile = fallibleStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/LongFallibleState.java" + } + template { + it.properties = floatProperties + it.inputFile = fallibleStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/FloatFallibleState.java" + } + template { + it.properties = doubleProperties + it.inputFile = fallibleStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/DoubleFallibleState.java" + } // block lookups File lookupInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/data/X-Lookup.java.st") template { @@ -504,6 +530,32 @@ tasks.named('stringTemplates').configure { it.inputFile = arrayStateInputFile it.outputFile = "org/elasticsearch/compute/aggregation/FloatArrayState.java" } + File fallibleArrayStateInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleArrayState.java.st") + template { + it.properties = booleanProperties + it.inputFile = fallibleArrayStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/BooleanFallibleArrayState.java" + } + template { + it.properties = intProperties + it.inputFile = fallibleArrayStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/IntFallibleArrayState.java" + } + template { + it.properties = longProperties + it.inputFile = fallibleArrayStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/LongFallibleArrayState.java" + } + template { + it.properties = doubleProperties + it.inputFile = fallibleArrayStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/DoubleFallibleArrayState.java" + } + template { + it.properties = floatProperties + it.inputFile = fallibleArrayStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/FloatFallibleArrayState.java" + } File valuesAggregatorInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-ValuesAggregator.java.st") template { it.properties = intProperties diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorFunctionSupplierImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorFunctionSupplierImplementer.java index 3f031db2978f9..f11ccbced6fbe 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorFunctionSupplierImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorFunctionSupplierImplementer.java @@ -10,6 +10,7 @@ import com.squareup.javapoet.ClassName; import com.squareup.javapoet.JavaFile; import com.squareup.javapoet.MethodSpec; +import com.squareup.javapoet.TypeName; import com.squareup.javapoet.TypeSpec; import org.elasticsearch.compute.ann.Aggregator; @@ -31,6 +32,7 @@ import static org.elasticsearch.compute.gen.Types.AGGREGATOR_FUNCTION_SUPPLIER; import static org.elasticsearch.compute.gen.Types.DRIVER_CONTEXT; import static org.elasticsearch.compute.gen.Types.LIST_INTEGER; +import static org.elasticsearch.compute.gen.Types.STRING; /** * Implements "AggregationFunctionSupplier" from a class annotated with both @@ -40,6 +42,7 @@ public class AggregatorFunctionSupplierImplementer { private final TypeElement declarationType; private final AggregatorImplementer aggregatorImplementer; private final GroupingAggregatorImplementer groupingAggregatorImplementer; + private final boolean hasWarnings; private final List createParameters; private final ClassName implementation; @@ -47,11 +50,13 @@ public AggregatorFunctionSupplierImplementer( Elements elements, TypeElement declarationType, AggregatorImplementer aggregatorImplementer, - GroupingAggregatorImplementer groupingAggregatorImplementer + GroupingAggregatorImplementer groupingAggregatorImplementer, + boolean hasWarnings ) { this.declarationType = declarationType; this.aggregatorImplementer = aggregatorImplementer; this.groupingAggregatorImplementer = groupingAggregatorImplementer; + this.hasWarnings = hasWarnings; Set createParameters = new LinkedHashSet<>(); if (aggregatorImplementer != null) { @@ -86,6 +91,11 @@ private TypeSpec type() { builder.addModifiers(Modifier.PUBLIC, Modifier.FINAL); builder.addSuperinterface(AGGREGATOR_FUNCTION_SUPPLIER); + if (hasWarnings) { + builder.addField(TypeName.INT, "warningsLineNumber"); + builder.addField(TypeName.INT, "warningsColumnNumber"); + builder.addField(STRING, "warningsSourceText"); + } createParameters.stream().forEach(p -> p.declareField(builder)); builder.addMethod(ctor()); if (aggregatorImplementer != null) { @@ -100,6 +110,14 @@ private TypeSpec type() { private MethodSpec ctor() { MethodSpec.Builder builder = MethodSpec.constructorBuilder().addModifiers(Modifier.PUBLIC); + if (hasWarnings) { + builder.addParameter(TypeName.INT, "warningsLineNumber"); + builder.addParameter(TypeName.INT, "warningsColumnNumber"); + builder.addParameter(STRING, "warningsSourceText"); + builder.addStatement("this.warningsLineNumber = warningsLineNumber"); + builder.addStatement("this.warningsColumnNumber = warningsColumnNumber"); + builder.addStatement("this.warningsSourceText = warningsSourceText"); + } createParameters.stream().forEach(p -> p.buildCtor(builder)); return builder.build(); } @@ -114,30 +132,48 @@ private MethodSpec unsupportedNonGroupingAggregator() { } private MethodSpec aggregator() { - MethodSpec.Builder builder = MethodSpec.methodBuilder("aggregator") - .addParameter(DRIVER_CONTEXT, "driverContext") - .returns(aggregatorImplementer.implementation()); + MethodSpec.Builder builder = MethodSpec.methodBuilder("aggregator"); builder.addAnnotation(Override.class).addModifiers(Modifier.PUBLIC); + builder.addParameter(DRIVER_CONTEXT, "driverContext"); + builder.returns(aggregatorImplementer.implementation()); + + if (hasWarnings) { + builder.addStatement( + "var warnings = Warnings.createWarnings(driverContext.warningsMode(), " + + "warningsLineNumber, warningsColumnNumber, warningsSourceText)" + ); + } + builder.addStatement( "return $T.create($L)", aggregatorImplementer.implementation(), - Stream.concat(Stream.of("driverContext, channels"), aggregatorImplementer.createParameters().stream().map(Parameter::name)) - .collect(Collectors.joining(", ")) + Stream.concat( + Stream.concat(hasWarnings ? Stream.of("warnings") : Stream.of(), Stream.of("driverContext, channels")), + aggregatorImplementer.createParameters().stream().map(Parameter::name) + ).collect(Collectors.joining(", ")) ); return builder.build(); } private MethodSpec groupingAggregator() { - MethodSpec.Builder builder = MethodSpec.methodBuilder("groupingAggregator") - .addParameter(DRIVER_CONTEXT, "driverContext") - .returns(groupingAggregatorImplementer.implementation()); + MethodSpec.Builder builder = MethodSpec.methodBuilder("groupingAggregator"); builder.addAnnotation(Override.class).addModifiers(Modifier.PUBLIC); + builder.addParameter(DRIVER_CONTEXT, "driverContext"); + builder.returns(groupingAggregatorImplementer.implementation()); + + if (hasWarnings) { + builder.addStatement( + "var warnings = Warnings.createWarnings(driverContext.warningsMode(), " + + "warningsLineNumber, warningsColumnNumber, warningsSourceText)" + ); + } + builder.addStatement( "return $T.create($L)", groupingAggregatorImplementer.implementation(), Stream.concat( - Stream.of("channels, driverContext"), + Stream.concat(hasWarnings ? Stream.of("warnings") : Stream.of(), Stream.of("channels, driverContext")), groupingAggregatorImplementer.createParameters().stream().map(Parameter::name) ).collect(Collectors.joining(", ")) ); diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java index 914724905541d..67ce0cf709704 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java @@ -21,10 +21,15 @@ import java.util.Arrays; import java.util.List; import java.util.Locale; +import java.util.Objects; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; import javax.lang.model.element.ExecutableElement; import javax.lang.model.element.Modifier; import javax.lang.model.element.TypeElement; +import javax.lang.model.type.TypeMirror; import javax.lang.model.util.Elements; import static java.util.stream.Collectors.joining; @@ -40,6 +45,7 @@ import static org.elasticsearch.compute.gen.Types.BYTES_REF; import static org.elasticsearch.compute.gen.Types.BYTES_REF_BLOCK; import static org.elasticsearch.compute.gen.Types.BYTES_REF_VECTOR; +import static org.elasticsearch.compute.gen.Types.COMPUTE_WARNINGS; import static org.elasticsearch.compute.gen.Types.DOUBLE_BLOCK; import static org.elasticsearch.compute.gen.Types.DOUBLE_VECTOR; import static org.elasticsearch.compute.gen.Types.DRIVER_CONTEXT; @@ -68,6 +74,7 @@ */ public class AggregatorImplementer { private final TypeElement declarationType; + private final List warnExceptions; private final ExecutableElement init; private final ExecutableElement combine; private final ExecutableElement combineValueCount; @@ -76,18 +83,28 @@ public class AggregatorImplementer { private final ClassName implementation; private final TypeName stateType; private final boolean stateTypeHasSeen; + private final boolean stateTypeHasFailed; private final boolean valuesIsBytesRef; private final List intermediateState; private final List createParameters; - public AggregatorImplementer(Elements elements, TypeElement declarationType, IntermediateState[] interStateAnno) { + public AggregatorImplementer( + Elements elements, + TypeElement declarationType, + IntermediateState[] interStateAnno, + List warnExceptions + ) { this.declarationType = declarationType; + this.warnExceptions = warnExceptions; this.init = findRequiredMethod(declarationType, new String[] { "init", "initSingle" }, e -> true); this.stateType = choseStateType(); - stateTypeHasSeen = elements.getAllMembers(elements.getTypeElement(stateType.toString())) + this.stateTypeHasSeen = elements.getAllMembers(elements.getTypeElement(stateType.toString())) .stream() .anyMatch(e -> e.toString().equals("seen()")); + this.stateTypeHasFailed = elements.getAllMembers(elements.getTypeElement(stateType.toString())) + .stream() + .anyMatch(e -> e.toString().equals("failed()")); this.combine = findRequiredMethod(declarationType, new String[] { "combine" }, e -> { if (e.getParameters().size() == 0) { @@ -126,7 +143,10 @@ private TypeName choseStateType() { if (false == initReturn.isPrimitive()) { return initReturn; } - return ClassName.get("org.elasticsearch.compute.aggregation", firstUpper(initReturn.toString()) + "State"); + if (warnExceptions.isEmpty()) { + return ClassName.get("org.elasticsearch.compute.aggregation", firstUpper(initReturn.toString()) + "State"); + } + return ClassName.get("org.elasticsearch.compute.aggregation", firstUpper(initReturn.toString()) + "FallibleState"); } static String valueType(ExecutableElement init, ExecutableElement combine) { @@ -202,6 +222,11 @@ private TypeSpec type() { .initializer(initInterState()) .build() ); + + if (warnExceptions.isEmpty() == false) { + builder.addField(COMPUTE_WARNINGS, "warnings", Modifier.PRIVATE, Modifier.FINAL); + } + builder.addField(DRIVER_CONTEXT, "driverContext", Modifier.PRIVATE, Modifier.FINAL); builder.addField(stateType, "state", Modifier.PRIVATE, Modifier.FINAL); builder.addField(LIST_INTEGER, "channels", Modifier.PRIVATE, Modifier.FINAL); @@ -228,17 +253,26 @@ private TypeSpec type() { private MethodSpec create() { MethodSpec.Builder builder = MethodSpec.methodBuilder("create"); builder.addModifiers(Modifier.PUBLIC, Modifier.STATIC).returns(implementation); + if (warnExceptions.isEmpty() == false) { + builder.addParameter(COMPUTE_WARNINGS, "warnings"); + } builder.addParameter(DRIVER_CONTEXT, "driverContext"); builder.addParameter(LIST_INTEGER, "channels"); for (Parameter p : createParameters) { builder.addParameter(p.type(), p.name()); } if (createParameters.isEmpty()) { - builder.addStatement("return new $T(driverContext, channels, $L)", implementation, callInit()); + builder.addStatement( + "return new $T($LdriverContext, channels, $L)", + implementation, + warnExceptions.isEmpty() ? "" : "warnings, ", + callInit() + ); } else { builder.addStatement( - "return new $T(driverContext, channels, $L, $L)", + "return new $T($LdriverContext, channels, $L, $L)", implementation, + warnExceptions.isEmpty() ? "" : "warnings, ", callInit(), createParameters.stream().map(p -> p.name()).collect(joining(", ")) ); @@ -275,16 +309,22 @@ private CodeBlock initInterState() { private MethodSpec ctor() { MethodSpec.Builder builder = MethodSpec.constructorBuilder().addModifiers(Modifier.PUBLIC); + if (warnExceptions.isEmpty() == false) { + builder.addParameter(COMPUTE_WARNINGS, "warnings"); + } builder.addParameter(DRIVER_CONTEXT, "driverContext"); builder.addParameter(LIST_INTEGER, "channels"); builder.addParameter(stateType, "state"); + + if (warnExceptions.isEmpty() == false) { + builder.addStatement("this.warnings = warnings"); + } builder.addStatement("this.driverContext = driverContext"); builder.addStatement("this.channels = channels"); builder.addStatement("this.state = state"); for (Parameter p : createParameters()) { - builder.addParameter(p.type(), p.name()); - builder.addStatement("this.$N = $N", p.name(), p.name()); + p.buildCtor(builder); } return builder.build(); } @@ -306,6 +346,11 @@ private MethodSpec intermediateBlockCount() { private MethodSpec addRawInput() { MethodSpec.Builder builder = MethodSpec.methodBuilder("addRawInput"); builder.addAnnotation(Override.class).addModifiers(Modifier.PUBLIC).addParameter(PAGE, "page"); + if (stateTypeHasFailed) { + builder.beginControlFlow("if (state.failed())"); + builder.addStatement("return"); + builder.endControlFlow(); + } builder.addStatement("$T block = page.getBlock(channels.get(0))", valueBlockType(init, combine)); builder.addStatement("$T vector = block.asVector()", valueVectorType(init, combine)); builder.beginControlFlow("if (vector != null)").addStatement("addRawVector(vector)"); @@ -366,20 +411,27 @@ private MethodSpec addRawBlock() { } private void combineRawInput(MethodSpec.Builder builder, String blockVariable) { + TypeName returnType = TypeName.get(combine.getReturnType()); + if (warnExceptions.isEmpty() == false) { + builder.beginControlFlow("try"); + } if (valuesIsBytesRef) { combineRawInputForBytesRef(builder, blockVariable); - return; - } - TypeName returnType = TypeName.get(combine.getReturnType()); - if (returnType.isPrimitive()) { + } else if (returnType.isPrimitive()) { combineRawInputForPrimitive(returnType, builder, blockVariable); - return; - } - if (returnType == TypeName.VOID) { + } else if (returnType == TypeName.VOID) { combineRawInputForVoid(builder, blockVariable); - return; + } else { + throw new IllegalArgumentException("combine must return void or a primitive"); + } + if (warnExceptions.isEmpty() == false) { + String catchPattern = "catch (" + warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) + " e)"; + builder.nextControlFlow(catchPattern, warnExceptions.stream().map(TypeName::get).toArray()); + builder.addStatement("warnings.registerException(e)"); + builder.addStatement("state.failed(true)"); + builder.addStatement("return"); + builder.endControlFlow(); } - throw new IllegalArgumentException("combine must return void or a primitive"); } private void combineRawInputForPrimitive(TypeName returnType, MethodSpec.Builder builder, String blockVariable) { @@ -423,16 +475,37 @@ private MethodSpec addIntermediateInput() { } builder.addStatement("$T.combineIntermediate(state, " + intermediateStateRowAccess() + ")", declarationType); } else if (hasPrimitiveState()) { - assert intermediateState.size() == 2; - assert intermediateState.get(1).name().equals("seen"); - builder.beginControlFlow("if (seen.getBoolean(0))"); - { - var state = intermediateState.get(0); - var s = "state.$L($T.combine(state.$L(), " + state.name() + "." + vectorAccessorName(state.elementType()) + "(0)))"; - builder.addStatement(s, primitiveStateMethod(), declarationType, primitiveStateMethod()); - builder.addStatement("state.seen(true)"); + if (warnExceptions.isEmpty()) { + assert intermediateState.size() == 2; + assert intermediateState.get(1).name().equals("seen"); + builder.beginControlFlow("if (seen.getBoolean(0))"); + } else { + assert intermediateState.size() == 3; + assert intermediateState.get(1).name().equals("seen"); + assert intermediateState.get(2).name().equals("failed"); + builder.beginControlFlow("if (failed.getBoolean(0))"); + { + builder.addStatement("state.failed(true)"); + builder.addStatement("state.seen(true)"); + } + builder.nextControlFlow("else if (seen.getBoolean(0))"); + } + + if (warnExceptions.isEmpty() == false) { + builder.beginControlFlow("try"); + } + var state = intermediateState.get(0); + var s = "state.$L($T.combine(state.$L(), " + state.name() + "." + vectorAccessorName(state.elementType()) + "(0)))"; + builder.addStatement(s, primitiveStateMethod(), declarationType, primitiveStateMethod()); + builder.addStatement("state.seen(true)"); + if (warnExceptions.isEmpty() == false) { + String catchPattern = "catch (" + warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) + " e)"; + builder.nextControlFlow(catchPattern, warnExceptions.stream().map(TypeName::get).toArray()); + builder.addStatement("warnings.registerException(e)"); + builder.addStatement("state.failed(true)"); builder.endControlFlow(); } + builder.endControlFlow(); } else { throw new IllegalArgumentException("Don't know how to combine intermediate input. Define combineIntermediate"); } @@ -445,15 +518,15 @@ String intermediateStateRowAccess() { private String primitiveStateMethod() { switch (stateType.toString()) { - case "org.elasticsearch.compute.aggregation.BooleanState": + case "org.elasticsearch.compute.aggregation.BooleanState", "org.elasticsearch.compute.aggregation.BooleanFallibleState": return "booleanValue"; - case "org.elasticsearch.compute.aggregation.IntState": + case "org.elasticsearch.compute.aggregation.IntState", "org.elasticsearch.compute.aggregation.IntFallibleState": return "intValue"; - case "org.elasticsearch.compute.aggregation.LongState": + case "org.elasticsearch.compute.aggregation.LongState", "org.elasticsearch.compute.aggregation.LongFallibleState": return "longValue"; - case "org.elasticsearch.compute.aggregation.DoubleState": + case "org.elasticsearch.compute.aggregation.DoubleState", "org.elasticsearch.compute.aggregation.DoubleFallibleState": return "doubleValue"; - case "org.elasticsearch.compute.aggregation.FloatState": + case "org.elasticsearch.compute.aggregation.FloatState", "org.elasticsearch.compute.aggregation.FloatFallibleState": return "floatValue"; default: throw new IllegalArgumentException( @@ -480,8 +553,11 @@ private MethodSpec evaluateFinal() { .addParameter(BLOCK_ARRAY, "blocks") .addParameter(TypeName.INT, "offset") .addParameter(DRIVER_CONTEXT, "driverContext"); - if (stateTypeHasSeen) { - builder.beginControlFlow("if (state.seen() == false)"); + if (stateTypeHasSeen || stateTypeHasFailed) { + var condition = Stream.of(stateTypeHasSeen ? "state.seen() == false" : null, stateTypeHasFailed ? "state.failed()" : null) + .filter(Objects::nonNull) + .collect(joining(" || ")); + builder.beginControlFlow("if ($L)", condition); builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1)", BLOCK); builder.addStatement("return"); builder.endControlFlow(); @@ -496,19 +572,19 @@ private MethodSpec evaluateFinal() { private void primitiveStateToResult(MethodSpec.Builder builder) { switch (stateType.toString()) { - case "org.elasticsearch.compute.aggregation.BooleanState": + case "org.elasticsearch.compute.aggregation.BooleanState", "org.elasticsearch.compute.aggregation.BooleanFallibleState": builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantBooleanBlockWith(state.booleanValue(), 1)"); return; - case "org.elasticsearch.compute.aggregation.IntState": + case "org.elasticsearch.compute.aggregation.IntState", "org.elasticsearch.compute.aggregation.IntFallibleState": builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantIntBlockWith(state.intValue(), 1)"); return; - case "org.elasticsearch.compute.aggregation.LongState": + case "org.elasticsearch.compute.aggregation.LongState", "org.elasticsearch.compute.aggregation.LongFallibleState": builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantLongBlockWith(state.longValue(), 1)"); return; - case "org.elasticsearch.compute.aggregation.DoubleState": + case "org.elasticsearch.compute.aggregation.DoubleState", "org.elasticsearch.compute.aggregation.DoubleFallibleState": builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantDoubleBlockWith(state.doubleValue(), 1)"); return; - case "org.elasticsearch.compute.aggregation.FloatState": + case "org.elasticsearch.compute.aggregation.FloatState", "org.elasticsearch.compute.aggregation.FloatFallibleState": builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantFloatBlockWith(state.floatValue(), 1)"); return; default: @@ -534,13 +610,12 @@ private MethodSpec close() { return builder.build(); } + private static final Pattern PRIMITIVE_STATE_PATTERN = Pattern.compile( + "org.elasticsearch.compute.aggregation.(Boolean|Int|Long|Double|Float)(Fallible)?State" + ); + private boolean hasPrimitiveState() { - return switch (stateType.toString()) { - case "org.elasticsearch.compute.aggregation.BooleanState", "org.elasticsearch.compute.aggregation.IntState", - "org.elasticsearch.compute.aggregation.LongState", "org.elasticsearch.compute.aggregation.DoubleState", - "org.elasticsearch.compute.aggregation.FloatState" -> true; - default -> false; - }; + return PRIMITIVE_STATE_PATTERN.matcher(stateType.toString()).matches(); } record IntermediateStateDesc(String name, String elementType, boolean block) { diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorProcessor.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorProcessor.java index d07b24047b7e2..4b1f946a1d176 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorProcessor.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorProcessor.java @@ -80,9 +80,14 @@ public boolean process(Set set, RoundEnvironment roundEnv } for (TypeElement aggClass : annotatedClasses) { AggregatorImplementer implementer = null; + var warnExceptionsTypes = Annotations.listAttributeValues( + aggClass, + Set.of(Aggregator.class, GroupingAggregator.class), + "warnExceptions" + ); if (aggClass.getAnnotation(Aggregator.class) != null) { IntermediateState[] intermediateState = aggClass.getAnnotation(Aggregator.class).value(); - implementer = new AggregatorImplementer(env.getElementUtils(), aggClass, intermediateState); + implementer = new AggregatorImplementer(env.getElementUtils(), aggClass, intermediateState, warnExceptionsTypes); write(aggClass, "aggregator", implementer.sourceFile(), env); } GroupingAggregatorImplementer groupingAggregatorImplementer = null; @@ -96,6 +101,7 @@ public boolean process(Set set, RoundEnvironment roundEnv env.getElementUtils(), aggClass, intermediateState, + warnExceptionsTypes, includeTimestamps ); write(aggClass, "grouping aggregator", groupingAggregatorImplementer.sourceFile(), env); @@ -104,8 +110,13 @@ public boolean process(Set set, RoundEnvironment roundEnv write( aggClass, "aggregator function supplier", - new AggregatorFunctionSupplierImplementer(env.getElementUtils(), aggClass, implementer, groupingAggregatorImplementer) - .sourceFile(), + new AggregatorFunctionSupplierImplementer( + env.getElementUtils(), + aggClass, + implementer, + groupingAggregatorImplementer, + warnExceptionsTypes.isEmpty() == false + ).sourceFile(), env ); } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Annotations.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Annotations.java new file mode 100644 index 0000000000000..d3892f7d2a40b --- /dev/null +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Annotations.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.gen; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import javax.lang.model.element.AnnotationValue; +import javax.lang.model.element.Element; +import javax.lang.model.type.TypeMirror; + +public class Annotations { + private Annotations() {} + + /** + * Returns the values of the requested attribute, from all the matching annotations on the given element. + * + * @param element the element to inspect + * @param annotations the annotations to look for + * @param attributeName the attribute to extract + */ + public static List listAttributeValues(Element element, Set> annotations, String attributeName) { + List result = new ArrayList<>(); + for (var mirror : element.getAnnotationMirrors()) { + String annotationType = mirror.getAnnotationType().toString(); + if (annotations.stream().anyMatch(a -> a.getName().equals(annotationType))) { + for (var e : mirror.getElementValues().entrySet()) { + if (false == e.getKey().getSimpleName().toString().equals(attributeName)) { + continue; + } + for (var v : (List) e.getValue().getValue()) { + result.add((TypeMirror) ((AnnotationValue) v).getValue()); + } + } + } + } + return result; + } +} diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorProcessor.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorProcessor.java index ea3ee938298de..09012c7b3a48a 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorProcessor.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorProcessor.java @@ -11,7 +11,6 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.MvEvaluator; -import java.util.ArrayList; import java.util.List; import java.util.Set; @@ -21,11 +20,9 @@ import javax.annotation.processing.RoundEnvironment; import javax.lang.model.SourceVersion; import javax.lang.model.element.AnnotationMirror; -import javax.lang.model.element.AnnotationValue; import javax.lang.model.element.Element; import javax.lang.model.element.ExecutableElement; import javax.lang.model.element.TypeElement; -import javax.lang.model.type.TypeMirror; import javax.tools.Diagnostic; /** @@ -69,6 +66,11 @@ public Iterable getCompletions( public boolean process(Set set, RoundEnvironment roundEnvironment) { for (TypeElement ann : set) { for (Element evaluatorMethod : roundEnvironment.getElementsAnnotatedWith(ann)) { + var warnExceptionsTypes = Annotations.listAttributeValues( + evaluatorMethod, + Set.of(Evaluator.class, MvEvaluator.class, ConvertEvaluator.class), + "warnExceptions" + ); Evaluator evaluatorAnn = evaluatorMethod.getAnnotation(Evaluator.class); if (evaluatorAnn != null) { try { @@ -80,7 +82,7 @@ public boolean process(Set set, RoundEnvironment roundEnv env.getTypeUtils(), (ExecutableElement) evaluatorMethod, evaluatorAnn.extraName(), - warnExceptions(evaluatorMethod) + warnExceptionsTypes ).sourceFile(), env ); @@ -102,7 +104,7 @@ public boolean process(Set set, RoundEnvironment roundEnv mvEvaluatorAnn.finish(), mvEvaluatorAnn.single(), mvEvaluatorAnn.ascending(), - warnExceptions(evaluatorMethod) + warnExceptionsTypes ).sourceFile(), env ); @@ -121,7 +123,7 @@ public boolean process(Set set, RoundEnvironment roundEnv env.getElementUtils(), (ExecutableElement) evaluatorMethod, convertEvaluatorAnn.extraName(), - warnExceptions(evaluatorMethod) + warnExceptionsTypes ).sourceFile(), env ); @@ -134,25 +136,4 @@ public boolean process(Set set, RoundEnvironment roundEnv } return true; } - - private static List warnExceptions(Element evaluatorMethod) { - List result = new ArrayList<>(); - for (var mirror : evaluatorMethod.getAnnotationMirrors()) { - String annotationType = mirror.getAnnotationType().toString(); - if (annotationType.equals(Evaluator.class.getName()) - || annotationType.equals(MvEvaluator.class.getName()) - || annotationType.equals(ConvertEvaluator.class.getName())) { - - for (var e : mirror.getElementValues().entrySet()) { - if (false == e.getKey().getSimpleName().toString().equals("warnExceptions")) { - continue; - } - for (var v : (List) e.getValue().getValue()) { - result.add((TypeMirror) ((AnnotationValue) v).getValue()); - } - } - } - } - return result; - } } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java index 79df41f304c06..0c4aeca996a19 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java @@ -22,11 +22,13 @@ import java.util.List; import java.util.Locale; import java.util.function.Consumer; +import java.util.regex.Pattern; import java.util.stream.Collectors; import javax.lang.model.element.ExecutableElement; import javax.lang.model.element.Modifier; import javax.lang.model.element.TypeElement; +import javax.lang.model.type.TypeMirror; import javax.lang.model.util.Elements; import static java.util.stream.Collectors.joining; @@ -38,6 +40,7 @@ import static org.elasticsearch.compute.gen.Types.BIG_ARRAYS; import static org.elasticsearch.compute.gen.Types.BLOCK_ARRAY; import static org.elasticsearch.compute.gen.Types.BYTES_REF; +import static org.elasticsearch.compute.gen.Types.COMPUTE_WARNINGS; import static org.elasticsearch.compute.gen.Types.DRIVER_CONTEXT; import static org.elasticsearch.compute.gen.Types.ELEMENT_TYPE; import static org.elasticsearch.compute.gen.Types.GROUPING_AGGREGATOR_FUNCTION; @@ -63,6 +66,7 @@ */ public class GroupingAggregatorImplementer { private final TypeElement declarationType; + private final List warnExceptions; private final ExecutableElement init; private final ExecutableElement combine; private final ExecutableElement combineStates; @@ -79,9 +83,11 @@ public GroupingAggregatorImplementer( Elements elements, TypeElement declarationType, IntermediateState[] interStateAnno, + List warnExceptions, boolean includeTimestampVector ) { this.declarationType = declarationType; + this.warnExceptions = warnExceptions; this.init = findRequiredMethod(declarationType, new String[] { "init", "initGrouping" }, e -> true); this.stateType = choseStateType(); @@ -129,7 +135,10 @@ private TypeName choseStateType() { } String head = initReturn.toString().substring(0, 1).toUpperCase(Locale.ROOT); String tail = initReturn.toString().substring(1); - return ClassName.get("org.elasticsearch.compute.aggregation", head + tail + "ArrayState"); + if (warnExceptions.isEmpty()) { + return ClassName.get("org.elasticsearch.compute.aggregation", head + tail + "ArrayState"); + } + return ClassName.get("org.elasticsearch.compute.aggregation", head + tail + "FallibleArrayState"); } public JavaFile sourceFile() { @@ -154,6 +163,9 @@ private TypeSpec type() { .build() ); builder.addField(stateType, "state", Modifier.PRIVATE, Modifier.FINAL); + if (warnExceptions.isEmpty() == false) { + builder.addField(COMPUTE_WARNINGS, "warnings", Modifier.PRIVATE, Modifier.FINAL); + } builder.addField(LIST_INTEGER, "channels", Modifier.PRIVATE, Modifier.FINAL); builder.addField(DRIVER_CONTEXT, "driverContext", Modifier.PRIVATE, Modifier.FINAL); @@ -182,17 +194,26 @@ private TypeSpec type() { private MethodSpec create() { MethodSpec.Builder builder = MethodSpec.methodBuilder("create"); builder.addModifiers(Modifier.PUBLIC, Modifier.STATIC).returns(implementation); + if (warnExceptions.isEmpty() == false) { + builder.addParameter(COMPUTE_WARNINGS, "warnings"); + } builder.addParameter(LIST_INTEGER, "channels"); builder.addParameter(DRIVER_CONTEXT, "driverContext"); for (Parameter p : createParameters) { builder.addParameter(p.type(), p.name()); } if (createParameters.isEmpty()) { - builder.addStatement("return new $T(channels, $L, driverContext)", implementation, callInit()); + builder.addStatement( + "return new $T($Lchannels, $L, driverContext)", + implementation, + warnExceptions.isEmpty() ? "" : "warnings, ", + callInit() + ); } else { builder.addStatement( - "return new $T(channels, $L, driverContext, $L)", + "return new $T($Lchannels, $L, driverContext, $L)", implementation, + warnExceptions.isEmpty() ? "" : "warnings, ", callInit(), createParameters.stream().map(p -> p.name()).collect(joining(", ")) ); @@ -235,9 +256,15 @@ private CodeBlock initInterState() { private MethodSpec ctor() { MethodSpec.Builder builder = MethodSpec.constructorBuilder().addModifiers(Modifier.PUBLIC); + if (warnExceptions.isEmpty() == false) { + builder.addParameter(COMPUTE_WARNINGS, "warnings"); + } builder.addParameter(LIST_INTEGER, "channels"); builder.addParameter(stateType, "state"); builder.addParameter(DRIVER_CONTEXT, "driverContext"); + if (warnExceptions.isEmpty() == false) { + builder.addStatement("this.warnings = warnings"); + } builder.addStatement("this.channels = channels"); builder.addStatement("this.state = state"); builder.addStatement("this.driverContext = driverContext"); @@ -349,6 +376,12 @@ private MethodSpec addRawInputLoop(TypeName groupsType, TypeName valuesType) { builder.addStatement("int groupId = Math.toIntExact(groups.getInt(groupPosition))"); } + if (warnExceptions.isEmpty() == false) { + builder.beginControlFlow("if (state.hasFailed(groupId))"); + builder.addStatement("continue"); + builder.endControlFlow(); + } + if (valuesIsBlock) { builder.beginControlFlow("if (values.isNull(groupPosition + positionOffset))"); builder.addStatement("continue"); @@ -371,31 +404,35 @@ private MethodSpec addRawInputLoop(TypeName groupsType, TypeName valuesType) { } private void combineRawInput(MethodSpec.Builder builder, String blockVariable, String offsetVariable) { - if (valuesIsBytesRef) { - combineRawInputForBytesRef(builder, blockVariable, offsetVariable); - return; - } - if (includeTimestampVector) { - combineRawInputWithTimestamp(builder, offsetVariable); - return; - } TypeName valueType = TypeName.get(combine.getParameters().get(combine.getParameters().size() - 1).asType()); - if (valueType.isPrimitive() == false) { - throw new IllegalArgumentException("second parameter to combine must be a primitive"); - } String secondParameterGetter = "get" + valueType.toString().substring(0, 1).toUpperCase(Locale.ROOT) + valueType.toString().substring(1); TypeName returnType = TypeName.get(combine.getReturnType()); - if (returnType.isPrimitive()) { - combineRawInputForPrimitive(builder, secondParameterGetter, blockVariable, offsetVariable); - return; + + if (warnExceptions.isEmpty() == false) { + builder.beginControlFlow("try"); } - if (returnType == TypeName.VOID) { + if (valuesIsBytesRef) { + combineRawInputForBytesRef(builder, blockVariable, offsetVariable); + } else if (includeTimestampVector) { + combineRawInputWithTimestamp(builder, offsetVariable); + } else if (valueType.isPrimitive() == false) { + throw new IllegalArgumentException("second parameter to combine must be a primitive"); + } else if (returnType.isPrimitive()) { + combineRawInputForPrimitive(builder, secondParameterGetter, blockVariable, offsetVariable); + } else if (returnType == TypeName.VOID) { combineRawInputForVoid(builder, secondParameterGetter, blockVariable, offsetVariable); - return; + } else { + throw new IllegalArgumentException("combine must return void or a primitive"); + } + if (warnExceptions.isEmpty() == false) { + String catchPattern = "catch (" + warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) + " e)"; + builder.nextControlFlow(catchPattern, warnExceptions.stream().map(TypeName::get).toArray()); + builder.addStatement("warnings.registerException(e)"); + builder.addStatement("state.setFailed(groupId)"); + builder.endControlFlow(); } - throw new IllegalArgumentException("combine must return void or a primitive"); } private void combineRawInputForPrimitive( @@ -481,20 +518,40 @@ private MethodSpec addIntermediateInput() { { builder.addStatement("int groupId = Math.toIntExact(groups.getInt(groupPosition))"); if (hasPrimitiveState()) { - assert intermediateState.size() == 2; - assert intermediateState.get(1).name().equals("seen"); - builder.beginControlFlow("if (seen.getBoolean(groupPosition + positionOffset))"); - { - var name = intermediateState.get(0).name(); - var m = vectorAccessorName(intermediateState.get(0).elementType()); - builder.addStatement( - "state.set(groupId, $T.combine(state.getOrDefault(groupId), $L.$L(groupPosition + positionOffset)))", - declarationType, - name, - m - ); + if (warnExceptions.isEmpty()) { + assert intermediateState.size() == 2; + assert intermediateState.get(1).name().equals("seen"); + builder.beginControlFlow("if (seen.getBoolean(groupPosition + positionOffset))"); + } else { + assert intermediateState.size() == 3; + assert intermediateState.get(1).name().equals("seen"); + assert intermediateState.get(2).name().equals("failed"); + builder.beginControlFlow("if (failed.getBoolean(groupPosition + positionOffset))"); + { + builder.addStatement("state.setFailed(groupId)"); + } + builder.nextControlFlow("else if (seen.getBoolean(groupPosition + positionOffset))"); + } + + if (warnExceptions.isEmpty() == false) { + builder.beginControlFlow("try"); + } + var name = intermediateState.get(0).name(); + var vectorAccessor = vectorAccessorName(intermediateState.get(0).elementType()); + builder.addStatement( + "state.set(groupId, $T.combine(state.getOrDefault(groupId), $L.$L(groupPosition + positionOffset)))", + declarationType, + name, + vectorAccessor + ); + if (warnExceptions.isEmpty() == false) { + String catchPattern = "catch (" + warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) + " e)"; + builder.nextControlFlow(catchPattern, warnExceptions.stream().map(TypeName::get).toArray()); + builder.addStatement("warnings.registerException(e)"); + builder.addStatement("state.setFailed(groupId)"); builder.endControlFlow(); } + builder.endControlFlow(); } else { builder.addStatement("$T.combineIntermediate(state, groupId, " + intermediateStateRowAccess() + ")", declarationType); } @@ -582,12 +639,11 @@ private MethodSpec close() { return builder.build(); } + private static final Pattern PRIMITIVE_STATE_PATTERN = Pattern.compile( + "org.elasticsearch.compute.aggregation.(Boolean|Int|Long|Double|Float)(Fallible)?ArrayState" + ); + private boolean hasPrimitiveState() { - return switch (stateType.toString()) { - case "org.elasticsearch.compute.aggregation.BooleanArrayState", "org.elasticsearch.compute.aggregation.IntArrayState", - "org.elasticsearch.compute.aggregation.LongArrayState", "org.elasticsearch.compute.aggregation.DoubleArrayState", - "org.elasticsearch.compute.aggregation.FloatArrayState" -> true; - default -> false; - }; + return PRIMITIVE_STATE_PATTERN.matcher(stateType.toString()).matches(); } } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java index 2b42adc67d71a..096d0b86e6cff 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java @@ -27,6 +27,8 @@ public class Types { private static final String OPERATOR_PACKAGE = PACKAGE + ".operator"; private static final String DATA_PACKAGE = PACKAGE + ".data"; + static final TypeName STRING = ClassName.get("java.lang", "String"); + static final TypeName LIST_INTEGER = ParameterizedTypeName.get(ClassName.get(List.class), TypeName.INT.box()); static final ClassName PAGE = ClassName.get(DATA_PACKAGE, "Page"); @@ -128,6 +130,11 @@ public class Types { ); static final ClassName WARNINGS = ClassName.get("org.elasticsearch.xpack.esql.expression.function", "Warnings"); + /** + * Warnings class used in compute module. + * It uses no external dependencies (Like Warnings and Source). + */ + static final ClassName COMPUTE_WARNINGS = ClassName.get("org.elasticsearch.compute.aggregation", "Warnings"); static final ClassName SOURCE = ClassName.get("org.elasticsearch.xpack.esql.core.tree", "Source"); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanFallibleArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanFallibleArrayState.java new file mode 100644 index 0000000000000..6367fdfb6617e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanFallibleArrayState.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator state for an array of booleans, that also tracks failures. + * It is created in a mode where it won't track + * the {@code groupId}s that are sent to it and it is the + * responsibility of the caller to only fetch values for {@code groupId}s + * that it has sent using the {@code selected} parameter when building the + * results. This is fine when there are no {@code null} values in the input + * data. But once there are null values in the input data it is + * much more convenient to only send non-null values and + * the tracking built into the grouping code can't track that. In that case + * call {@link #enableGroupIdTracking} to transition the state into a mode + * where it'll track which {@code groupIds} have been written. + *

    + * This class is generated. Do not edit it. + *

    + */ +final class BooleanFallibleArrayState extends AbstractFallibleArrayState implements GroupingAggregatorState { + private final boolean init; + + private BitArray values; + private int size; + + BooleanFallibleArrayState(BigArrays bigArrays, boolean init) { + super(bigArrays); + this.values = new BitArray(1, bigArrays); + this.size = 1; + this.values.set(0, init); + this.init = init; + } + + boolean get(int groupId) { + return values.get(groupId); + } + + boolean getOrDefault(int groupId) { + return groupId < size ? values.get(groupId) : init; + } + + void set(int groupId, boolean value) { + ensureCapacity(groupId); + values.set(groupId, value); + trackGroupId(groupId); + } + + Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { + if (false == trackingGroupIds() && false == anyFailure()) { + try (var builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + builder.appendBoolean(i, values.get(selected.getInt(i))); + } + return builder.build().asBlock(); + } + } + try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group) && !hasFailed(group)) { + builder.appendBoolean(values.get(group)); + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + private void ensureCapacity(int groupId) { + if (groupId >= size) { + values.fill(size, groupId + 1, init); + size = groupId + 1; + } + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate( + Block[] blocks, + int offset, + IntVector selected, + org.elasticsearch.compute.operator.DriverContext driverContext + ) { + assert blocks.length >= offset + 3; + try ( + var valuesBuilder = driverContext.blockFactory().newBooleanBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()); + var hasFailedBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) + ) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (group < size) { + valuesBuilder.appendBoolean(values.get(group)); + } else { + valuesBuilder.appendBoolean(false); // TODO can we just use null? + } + hasValueBuilder.appendBoolean(i, hasValue(group)); + hasFailedBuilder.appendBoolean(i, hasFailed(group)); + } + blocks[offset + 0] = valuesBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); + blocks[offset + 2] = hasFailedBuilder.build().asBlock(); + } + } + + @Override + public void close() { + Releasables.close(values, super::close); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanFallibleState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanFallibleState.java new file mode 100644 index 0000000000000..073f31c390a6f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanFallibleState.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * Aggregator state for a single boolean. + * It stores a third boolean to store if the aggregation failed. + * This class is generated. Do not edit it. + */ +final class BooleanFallibleState implements AggregatorState { + private boolean value; + private boolean seen; + private boolean failed; + + BooleanFallibleState(boolean init) { + this.value = init; + } + + boolean booleanValue() { + return value; + } + + void booleanValue(boolean value) { + this.value = value; + } + + boolean seen() { + return seen; + } + + void seen(boolean seen) { + this.seen = seen; + } + + boolean failed() { + return failed; + } + + void failed(boolean failed) { + this.failed = failed; + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + assert blocks.length >= offset + 3; + blocks[offset + 0] = driverContext.blockFactory().newConstantBooleanBlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + blocks[offset + 2] = driverContext.blockFactory().newConstantBooleanBlockWith(failed, 1); + } + + @Override + public void close() {} +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanState.java index 7d225c7c06a72..ba4d133dee553 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanState.java @@ -18,10 +18,6 @@ final class BooleanState implements AggregatorState { private boolean value; private boolean seen; - BooleanState() { - this(false); - } - BooleanState(boolean init) { this.value = init; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleFallibleArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleFallibleArrayState.java new file mode 100644 index 0000000000000..dd1d60f7bd246 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleFallibleArrayState.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.DoubleArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator state for an array of doubles, that also tracks failures. + * It is created in a mode where it won't track + * the {@code groupId}s that are sent to it and it is the + * responsibility of the caller to only fetch values for {@code groupId}s + * that it has sent using the {@code selected} parameter when building the + * results. This is fine when there are no {@code null} values in the input + * data. But once there are null values in the input data it is + * much more convenient to only send non-null values and + * the tracking built into the grouping code can't track that. In that case + * call {@link #enableGroupIdTracking} to transition the state into a mode + * where it'll track which {@code groupIds} have been written. + *

    + * This class is generated. Do not edit it. + *

    + */ +final class DoubleFallibleArrayState extends AbstractFallibleArrayState implements GroupingAggregatorState { + private final double init; + + private DoubleArray values; + + DoubleFallibleArrayState(BigArrays bigArrays, double init) { + super(bigArrays); + this.values = bigArrays.newDoubleArray(1, false); + this.values.set(0, init); + this.init = init; + } + + double get(int groupId) { + return values.get(groupId); + } + + double getOrDefault(int groupId) { + return groupId < values.size() ? values.get(groupId) : init; + } + + void set(int groupId, double value) { + ensureCapacity(groupId); + values.set(groupId, value); + trackGroupId(groupId); + } + + Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { + if (false == trackingGroupIds() && false == anyFailure()) { + try (var builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + builder.appendDouble(i, values.get(selected.getInt(i))); + } + return builder.build().asBlock(); + } + } + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group) && !hasFailed(group)) { + builder.appendDouble(values.get(group)); + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + private void ensureCapacity(int groupId) { + if (groupId >= values.size()) { + long prevSize = values.size(); + values = bigArrays.grow(values, groupId + 1); + values.fill(prevSize, values.size(), init); + } + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate( + Block[] blocks, + int offset, + IntVector selected, + org.elasticsearch.compute.operator.DriverContext driverContext + ) { + assert blocks.length >= offset + 3; + try ( + var valuesBuilder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()); + var hasFailedBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) + ) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (group < values.size()) { + valuesBuilder.appendDouble(values.get(group)); + } else { + valuesBuilder.appendDouble(0); // TODO can we just use null? + } + hasValueBuilder.appendBoolean(i, hasValue(group)); + hasFailedBuilder.appendBoolean(i, hasFailed(group)); + } + blocks[offset + 0] = valuesBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); + blocks[offset + 2] = hasFailedBuilder.build().asBlock(); + } + } + + @Override + public void close() { + Releasables.close(values, super::close); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleFallibleState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleFallibleState.java new file mode 100644 index 0000000000000..4cdeddec724bf --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleFallibleState.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * Aggregator state for a single double. + * It stores a third boolean to store if the aggregation failed. + * This class is generated. Do not edit it. + */ +final class DoubleFallibleState implements AggregatorState { + private double value; + private boolean seen; + private boolean failed; + + DoubleFallibleState(double init) { + this.value = init; + } + + double doubleValue() { + return value; + } + + void doubleValue(double value) { + this.value = value; + } + + boolean seen() { + return seen; + } + + void seen(boolean seen) { + this.seen = seen; + } + + boolean failed() { + return failed; + } + + void failed(boolean failed) { + this.failed = failed; + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + assert blocks.length >= offset + 3; + blocks[offset + 0] = driverContext.blockFactory().newConstantDoubleBlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + blocks[offset + 2] = driverContext.blockFactory().newConstantBooleanBlockWith(failed, 1); + } + + @Override + public void close() {} +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleState.java index f1c92c685bcab..90ecc2c1d3c03 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleState.java @@ -18,10 +18,6 @@ final class DoubleState implements AggregatorState { private double value; private boolean seen; - DoubleState() { - this(0); - } - DoubleState(double init) { this.value = init; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatFallibleArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatFallibleArrayState.java new file mode 100644 index 0000000000000..055cf345033c5 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatFallibleArrayState.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.FloatArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator state for an array of floats, that also tracks failures. + * It is created in a mode where it won't track + * the {@code groupId}s that are sent to it and it is the + * responsibility of the caller to only fetch values for {@code groupId}s + * that it has sent using the {@code selected} parameter when building the + * results. This is fine when there are no {@code null} values in the input + * data. But once there are null values in the input data it is + * much more convenient to only send non-null values and + * the tracking built into the grouping code can't track that. In that case + * call {@link #enableGroupIdTracking} to transition the state into a mode + * where it'll track which {@code groupIds} have been written. + *

    + * This class is generated. Do not edit it. + *

    + */ +final class FloatFallibleArrayState extends AbstractFallibleArrayState implements GroupingAggregatorState { + private final float init; + + private FloatArray values; + + FloatFallibleArrayState(BigArrays bigArrays, float init) { + super(bigArrays); + this.values = bigArrays.newFloatArray(1, false); + this.values.set(0, init); + this.init = init; + } + + float get(int groupId) { + return values.get(groupId); + } + + float getOrDefault(int groupId) { + return groupId < values.size() ? values.get(groupId) : init; + } + + void set(int groupId, float value) { + ensureCapacity(groupId); + values.set(groupId, value); + trackGroupId(groupId); + } + + Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { + if (false == trackingGroupIds() && false == anyFailure()) { + try (var builder = driverContext.blockFactory().newFloatVectorFixedBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + builder.appendFloat(i, values.get(selected.getInt(i))); + } + return builder.build().asBlock(); + } + } + try (FloatBlock.Builder builder = driverContext.blockFactory().newFloatBlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group) && !hasFailed(group)) { + builder.appendFloat(values.get(group)); + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + private void ensureCapacity(int groupId) { + if (groupId >= values.size()) { + long prevSize = values.size(); + values = bigArrays.grow(values, groupId + 1); + values.fill(prevSize, values.size(), init); + } + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate( + Block[] blocks, + int offset, + IntVector selected, + org.elasticsearch.compute.operator.DriverContext driverContext + ) { + assert blocks.length >= offset + 3; + try ( + var valuesBuilder = driverContext.blockFactory().newFloatBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()); + var hasFailedBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) + ) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (group < values.size()) { + valuesBuilder.appendFloat(values.get(group)); + } else { + valuesBuilder.appendFloat(0); // TODO can we just use null? + } + hasValueBuilder.appendBoolean(i, hasValue(group)); + hasFailedBuilder.appendBoolean(i, hasFailed(group)); + } + blocks[offset + 0] = valuesBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); + blocks[offset + 2] = hasFailedBuilder.build().asBlock(); + } + } + + @Override + public void close() { + Releasables.close(values, super::close); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatFallibleState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatFallibleState.java new file mode 100644 index 0000000000000..b050c86258dcd --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatFallibleState.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * Aggregator state for a single float. + * It stores a third boolean to store if the aggregation failed. + * This class is generated. Do not edit it. + */ +final class FloatFallibleState implements AggregatorState { + private float value; + private boolean seen; + private boolean failed; + + FloatFallibleState(float init) { + this.value = init; + } + + float floatValue() { + return value; + } + + void floatValue(float value) { + this.value = value; + } + + boolean seen() { + return seen; + } + + void seen(boolean seen) { + this.seen = seen; + } + + boolean failed() { + return failed; + } + + void failed(boolean failed) { + this.failed = failed; + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + assert blocks.length >= offset + 3; + blocks[offset + 0] = driverContext.blockFactory().newConstantFloatBlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + blocks[offset + 2] = driverContext.blockFactory().newConstantBooleanBlockWith(failed, 1); + } + + @Override + public void close() {} +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatState.java index 81bdd39e51b6e..6f608271b6e42 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatState.java @@ -18,10 +18,6 @@ final class FloatState implements AggregatorState { private float value; private boolean seen; - FloatState() { - this(0); - } - FloatState(float init) { this.value = init; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntFallibleArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntFallibleArrayState.java new file mode 100644 index 0000000000000..e45d84720ca1a --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntFallibleArrayState.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.IntArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator state for an array of ints, that also tracks failures. + * It is created in a mode where it won't track + * the {@code groupId}s that are sent to it and it is the + * responsibility of the caller to only fetch values for {@code groupId}s + * that it has sent using the {@code selected} parameter when building the + * results. This is fine when there are no {@code null} values in the input + * data. But once there are null values in the input data it is + * much more convenient to only send non-null values and + * the tracking built into the grouping code can't track that. In that case + * call {@link #enableGroupIdTracking} to transition the state into a mode + * where it'll track which {@code groupIds} have been written. + *

    + * This class is generated. Do not edit it. + *

    + */ +final class IntFallibleArrayState extends AbstractFallibleArrayState implements GroupingAggregatorState { + private final int init; + + private IntArray values; + + IntFallibleArrayState(BigArrays bigArrays, int init) { + super(bigArrays); + this.values = bigArrays.newIntArray(1, false); + this.values.set(0, init); + this.init = init; + } + + int get(int groupId) { + return values.get(groupId); + } + + int getOrDefault(int groupId) { + return groupId < values.size() ? values.get(groupId) : init; + } + + void set(int groupId, int value) { + ensureCapacity(groupId); + values.set(groupId, value); + trackGroupId(groupId); + } + + Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { + if (false == trackingGroupIds() && false == anyFailure()) { + try (var builder = driverContext.blockFactory().newIntVectorFixedBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + builder.appendInt(i, values.get(selected.getInt(i))); + } + return builder.build().asBlock(); + } + } + try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group) && !hasFailed(group)) { + builder.appendInt(values.get(group)); + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + private void ensureCapacity(int groupId) { + if (groupId >= values.size()) { + long prevSize = values.size(); + values = bigArrays.grow(values, groupId + 1); + values.fill(prevSize, values.size(), init); + } + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate( + Block[] blocks, + int offset, + IntVector selected, + org.elasticsearch.compute.operator.DriverContext driverContext + ) { + assert blocks.length >= offset + 3; + try ( + var valuesBuilder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()); + var hasFailedBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) + ) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (group < values.size()) { + valuesBuilder.appendInt(values.get(group)); + } else { + valuesBuilder.appendInt(0); // TODO can we just use null? + } + hasValueBuilder.appendBoolean(i, hasValue(group)); + hasFailedBuilder.appendBoolean(i, hasFailed(group)); + } + blocks[offset + 0] = valuesBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); + blocks[offset + 2] = hasFailedBuilder.build().asBlock(); + } + } + + @Override + public void close() { + Releasables.close(values, super::close); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntFallibleState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntFallibleState.java new file mode 100644 index 0000000000000..360f3fdb009e4 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntFallibleState.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * Aggregator state for a single int. + * It stores a third boolean to store if the aggregation failed. + * This class is generated. Do not edit it. + */ +final class IntFallibleState implements AggregatorState { + private int value; + private boolean seen; + private boolean failed; + + IntFallibleState(int init) { + this.value = init; + } + + int intValue() { + return value; + } + + void intValue(int value) { + this.value = value; + } + + boolean seen() { + return seen; + } + + void seen(boolean seen) { + this.seen = seen; + } + + boolean failed() { + return failed; + } + + void failed(boolean failed) { + this.failed = failed; + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + assert blocks.length >= offset + 3; + blocks[offset + 0] = driverContext.blockFactory().newConstantIntBlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + blocks[offset + 2] = driverContext.blockFactory().newConstantBooleanBlockWith(failed, 1); + } + + @Override + public void close() {} +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntState.java index e7db40eccf9c8..c539c576ef36d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntState.java @@ -18,10 +18,6 @@ final class IntState implements AggregatorState { private int value; private boolean seen; - IntState() { - this(0); - } - IntState(int init) { this.value = init; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongFallibleArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongFallibleArrayState.java new file mode 100644 index 0000000000000..cb69579906871 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongFallibleArrayState.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator state for an array of longs, that also tracks failures. + * It is created in a mode where it won't track + * the {@code groupId}s that are sent to it and it is the + * responsibility of the caller to only fetch values for {@code groupId}s + * that it has sent using the {@code selected} parameter when building the + * results. This is fine when there are no {@code null} values in the input + * data. But once there are null values in the input data it is + * much more convenient to only send non-null values and + * the tracking built into the grouping code can't track that. In that case + * call {@link #enableGroupIdTracking} to transition the state into a mode + * where it'll track which {@code groupIds} have been written. + *

    + * This class is generated. Do not edit it. + *

    + */ +final class LongFallibleArrayState extends AbstractFallibleArrayState implements GroupingAggregatorState { + private final long init; + + private LongArray values; + + LongFallibleArrayState(BigArrays bigArrays, long init) { + super(bigArrays); + this.values = bigArrays.newLongArray(1, false); + this.values.set(0, init); + this.init = init; + } + + long get(int groupId) { + return values.get(groupId); + } + + long getOrDefault(int groupId) { + return groupId < values.size() ? values.get(groupId) : init; + } + + void set(int groupId, long value) { + ensureCapacity(groupId); + values.set(groupId, value); + trackGroupId(groupId); + } + + void increment(int groupId, long value) { + ensureCapacity(groupId); + values.increment(groupId, value); + trackGroupId(groupId); + } + + Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { + if (false == trackingGroupIds() && false == anyFailure()) { + try (var builder = driverContext.blockFactory().newLongVectorFixedBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + builder.appendLong(i, values.get(selected.getInt(i))); + } + return builder.build().asBlock(); + } + } + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group) && !hasFailed(group)) { + builder.appendLong(values.get(group)); + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + private void ensureCapacity(int groupId) { + if (groupId >= values.size()) { + long prevSize = values.size(); + values = bigArrays.grow(values, groupId + 1); + values.fill(prevSize, values.size(), init); + } + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate( + Block[] blocks, + int offset, + IntVector selected, + org.elasticsearch.compute.operator.DriverContext driverContext + ) { + assert blocks.length >= offset + 3; + try ( + var valuesBuilder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()); + var hasFailedBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) + ) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (group < values.size()) { + valuesBuilder.appendLong(values.get(group)); + } else { + valuesBuilder.appendLong(0); // TODO can we just use null? + } + hasValueBuilder.appendBoolean(i, hasValue(group)); + hasFailedBuilder.appendBoolean(i, hasFailed(group)); + } + blocks[offset + 0] = valuesBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); + blocks[offset + 2] = hasFailedBuilder.build().asBlock(); + } + } + + @Override + public void close() { + Releasables.close(values, super::close); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongFallibleState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongFallibleState.java new file mode 100644 index 0000000000000..98669ef627d04 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongFallibleState.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * Aggregator state for a single long. + * It stores a third boolean to store if the aggregation failed. + * This class is generated. Do not edit it. + */ +final class LongFallibleState implements AggregatorState { + private long value; + private boolean seen; + private boolean failed; + + LongFallibleState(long init) { + this.value = init; + } + + long longValue() { + return value; + } + + void longValue(long value) { + this.value = value; + } + + boolean seen() { + return seen; + } + + void seen(boolean seen) { + this.seen = seen; + } + + boolean failed() { + return failed; + } + + void failed(boolean failed) { + this.failed = failed; + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + assert blocks.length >= offset + 3; + blocks[offset + 0] = driverContext.blockFactory().newConstantLongBlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + blocks[offset + 2] = driverContext.blockFactory().newConstantBooleanBlockWith(failed, 1); + } + + @Override + public void close() {} +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongState.java index da78b649782d5..e9d97dcfe7fc1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongState.java @@ -18,10 +18,6 @@ final class LongState implements AggregatorState { private long value; private boolean seen; - LongState() { - this(0); - } - LongState(long init) { this.value = init; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractArrayState.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractArrayState.java index 1573efdd81059..f9962922cc4a7 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractArrayState.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractArrayState.java @@ -12,6 +12,13 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +/** + * Base class for array states that track which group ids have been set. + * Most of this class subclasses are autogenerated. + *

    + * Most of this class subclasses are autogenerated. + *

    + */ public class AbstractArrayState implements Releasable { protected final BigArrays bigArrays; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractFallibleArrayState.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractFallibleArrayState.java new file mode 100644 index 0000000000000..d5ad3189e2f9e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/AbstractFallibleArrayState.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.core.Releasables; + +/** + * Base class that extends {@link AbstractArrayState} to add failure tracking. + * That is, when a group id fails, it is marked as failed in the state. + *

    + * Most of this class subclasses are autogenerated. + *

    + */ +public class AbstractFallibleArrayState extends AbstractArrayState { + private BitArray failed; + + public AbstractFallibleArrayState(BigArrays bigArrays) { + super(bigArrays); + } + + final boolean hasFailed(int groupId) { + return failed != null && failed.get(groupId); + } + + protected final boolean anyFailure() { + return failed != null; + } + + protected final void setFailed(int groupId) { + if (failed == null) { + failed = new BitArray(groupId + 1, bigArrays); + } + failed.set(groupId); + } + + @Override + public void close() { + super.close(); + Releasables.close(failed); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountAggregatorFunction.java index 13a4204edfd8f..c32f6f4703a79 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountAggregatorFunction.java @@ -52,7 +52,7 @@ public static List intermediateStateDesc() { private final boolean countAll; public static CountAggregatorFunction create(List inputChannels) { - return new CountAggregatorFunction(inputChannels, new LongState()); + return new CountAggregatorFunction(inputChannels, new LongState(0)); } private CountAggregatorFunction(List channels, LongState state) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/Warnings.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/Warnings.java new file mode 100644 index 0000000000000..eb2255a4e349b --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/Warnings.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.operator.DriverContext; + +import static org.elasticsearch.common.logging.HeaderWarning.addWarning; +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; + +/** + * Utilities to collect warnings for running an executor. + */ +public class Warnings { + static final int MAX_ADDED_WARNINGS = 20; + + private final String location; + private final String first; + + private int addedWarnings; + + public static final Warnings NOOP_WARNINGS = new Warnings(-1, -2, "") { + @Override + public void registerException(Exception exception) { + // this space intentionally left blank + } + }; + + /** + * Create a new warnings object based on the given mode + * @param warningsMode The warnings collection strategy to use + * @param lineNumber The line number of the source text. Same as `source.getLineNumber()` + * @param columnNumber The column number of the source text. Same as `source.getColumnNumber()` + * @param sourceText The source text that caused the warning. Same as `source.text()` + * @return A warnings collector object + */ + public static Warnings createWarnings(DriverContext.WarningsMode warningsMode, int lineNumber, int columnNumber, String sourceText) { + switch (warningsMode) { + case COLLECT -> { + return new Warnings(lineNumber, columnNumber, sourceText); + } + case IGNORE -> { + return NOOP_WARNINGS; + } + } + throw new IllegalStateException("Unreachable"); + } + + public Warnings(int lineNumber, int columnNumber, String sourceText) { + location = format("Line {}:{}: ", lineNumber, columnNumber); + first = format( + null, + "{}evaluation of [{}] failed, treating result as null. Only first {} failures recorded.", + location, + sourceText, + MAX_ADDED_WARNINGS + ); + } + + public void registerException(Exception exception) { + if (addedWarnings < MAX_ADDED_WARNINGS) { + if (addedWarnings == 0) { + addWarning(first); + } + // location needs to be added to the exception too, since the headers are deduplicated + addWarning(location + exception.getClass().getName() + ": " + exception.getMessage()); + addedWarnings++; + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleArrayState.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleArrayState.java.st new file mode 100644 index 0000000000000..3c57ab948a79f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleArrayState.java.st @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +$if(boolean)$ +import org.elasticsearch.common.util.BitArray; +$else$ +import org.elasticsearch.common.util.$Type$Array; +$endif$ +import org.elasticsearch.compute.data.Block; +$if(long)$ +import org.elasticsearch.compute.data.IntVector; +$endif$ +import org.elasticsearch.compute.data.$Type$Block; +$if(int)$ +import org.elasticsearch.compute.data.$Type$Vector; +$endif$ +$if(boolean||double||float)$ +import org.elasticsearch.compute.data.IntVector; +$endif$ +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator state for an array of $type$s, that also tracks failures. + * It is created in a mode where it won't track + * the {@code groupId}s that are sent to it and it is the + * responsibility of the caller to only fetch values for {@code groupId}s + * that it has sent using the {@code selected} parameter when building the + * results. This is fine when there are no {@code null} values in the input + * data. But once there are null values in the input data it is + * much more convenient to only send non-null values and + * the tracking built into the grouping code can't track that. In that case + * call {@link #enableGroupIdTracking} to transition the state into a mode + * where it'll track which {@code groupIds} have been written. + *

    + * This class is generated. Do not edit it. + *

    + */ +final class $Type$FallibleArrayState extends AbstractFallibleArrayState implements GroupingAggregatorState { + private final $type$ init; + +$if(boolean)$ + private BitArray values; + private int size; + +$else$ + private $Type$Array values; +$endif$ + + $Type$FallibleArrayState(BigArrays bigArrays, $type$ init) { + super(bigArrays); +$if(boolean)$ + this.values = new BitArray(1, bigArrays); + this.size = 1; +$else$ + this.values = bigArrays.new$Type$Array(1, false); +$endif$ + this.values.set(0, init); + this.init = init; + } + + $type$ get(int groupId) { + return values.get(groupId); + } + + $type$ getOrDefault(int groupId) { +$if(boolean)$ + return groupId < size ? values.get(groupId) : init; +$else$ + return groupId < values.size() ? values.get(groupId) : init; +$endif$ + } + + void set(int groupId, $type$ value) { + ensureCapacity(groupId); + values.set(groupId, value); + trackGroupId(groupId); + } + +$if(long)$ + void increment(int groupId, long value) { + ensureCapacity(groupId); + values.increment(groupId, value); + trackGroupId(groupId); + } +$endif$ + + Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { + if (false == trackingGroupIds() && false == anyFailure()) { + try (var builder = driverContext.blockFactory().new$Type$VectorFixedBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + builder.append$Type$(i, values.get(selected.getInt(i))); + } + return builder.build().asBlock(); + } + } + try ($Type$Block.Builder builder = driverContext.blockFactory().new$Type$BlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group) && !hasFailed(group)) { + builder.append$Type$(values.get(group)); + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + private void ensureCapacity(int groupId) { +$if(boolean)$ + if (groupId >= size) { + values.fill(size, groupId + 1, init); + size = groupId + 1; + } +$else$ + if (groupId >= values.size()) { + long prevSize = values.size(); + values = bigArrays.grow(values, groupId + 1); + values.fill(prevSize, values.size(), init); + } +$endif$ + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate( + Block[] blocks, + int offset, + IntVector selected, + org.elasticsearch.compute.operator.DriverContext driverContext + ) { + assert blocks.length >= offset + 3; + try ( + var valuesBuilder = driverContext.blockFactory().new$Type$BlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()); + var hasFailedBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) + ) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (group < $if(boolean)$size$else$values.size()$endif$) { + valuesBuilder.append$Type$(values.get(group)); + } else { + valuesBuilder.append$Type$($if(boolean)$false$else$0$endif$); // TODO can we just use null? + } + hasValueBuilder.appendBoolean(i, hasValue(group)); + hasFailedBuilder.appendBoolean(i, hasFailed(group)); + } + blocks[offset + 0] = valuesBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); + blocks[offset + 2] = hasFailedBuilder.build().asBlock(); + } + } + + @Override + public void close() { + Releasables.close(values, super::close); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleState.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleState.java.st new file mode 100644 index 0000000000000..27609383e4f61 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleState.java.st @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * Aggregator state for a single $type$. + * It stores a third boolean to store if the aggregation failed. + * This class is generated. Do not edit it. + */ +final class $Type$FallibleState implements AggregatorState { + private $type$ value; + private boolean seen; + private boolean failed; + + $Type$FallibleState($type$ init) { + this.value = init; + } + + $type$ $type$Value() { + return value; + } + + void $type$Value($type$ value) { + this.value = value; + } + + boolean seen() { + return seen; + } + + void seen(boolean seen) { + this.seen = seen; + } + + boolean failed() { + return failed; + } + + void failed(boolean failed) { + this.failed = failed; + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + assert blocks.length >= offset + 3; + blocks[offset + 0] = driverContext.blockFactory().newConstant$Type$BlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + blocks[offset + 2] = driverContext.blockFactory().newConstantBooleanBlockWith(failed, 1); + } + + @Override + public void close() {} +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st index 2d2d706c9454f..7e0949c86faaa 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st @@ -18,14 +18,6 @@ final class $Type$State implements AggregatorState { private $type$ value; private boolean seen; - $Type$State() { -$if(boolean)$ - this(false); -$else$ - this(0); -$endif$ - } - $Type$State($type$ init) { this.value = init; } From 5d5d2e8a6362be73f8ce5e1ec87e1652c7eb5b47 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Wed, 21 Aug 2024 11:39:54 +0200 Subject: [PATCH 117/389] [ESQL] Fix cases of geometry collections with one point in CartesianPoint queries (#111193) --- docs/changelog/111193.yaml | 6 ++++++ muted-tests.yml | 3 --- .../xpack/esql/querydsl/query/SpatialRelatesQuery.java | 9 ++++++--- 3 files changed, 12 insertions(+), 6 deletions(-) create mode 100644 docs/changelog/111193.yaml diff --git a/docs/changelog/111193.yaml b/docs/changelog/111193.yaml new file mode 100644 index 0000000000000..9e56facb60d3a --- /dev/null +++ b/docs/changelog/111193.yaml @@ -0,0 +1,6 @@ +pr: 111193 +summary: Fix cases of collections with one point +area: Geo +type: bug +issues: + - 110982 diff --git a/muted-tests.yml b/muted-tests.yml index 5a92196e4e51b..145602ad7e1aa 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -74,9 +74,6 @@ tests: - class: org.elasticsearch.xpack.esql.spatial.SpatialPushDownGeoPointIT method: testPushedDownQueriesSingleValue issue: https://github.com/elastic/elasticsearch/issues/111084 -- class: org.elasticsearch.xpack.esql.spatial.SpatialPushDownCartesianPointIT - method: testPushedDownQueriesSingleValue - issue: https://github.com/elastic/elasticsearch/issues/110982 - class: org.elasticsearch.multi_node.GlobalCheckpointSyncActionIT issue: https://github.com/elastic/elasticsearch/issues/111124 - class: org.elasticsearch.cluster.PrevalidateShardPathIT diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java index 7a47b1d38f053..d1e4e12f73868 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.geometry.ShapeType; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.GeoShapeQueryable; import org.elasticsearch.index.mapper.MappedFieldType; @@ -228,10 +227,10 @@ private static org.apache.lucene.search.Query pointShapeQuery( if (geometry == null || geometry.isEmpty()) { throw new QueryShardException(context, "Invalid/empty geometry"); } - if (geometry.type() != ShapeType.POINT && relation == ShapeField.QueryRelation.CONTAINS) { + final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); + if (isPointGeometry(luceneGeometries) == false && relation == ShapeField.QueryRelation.CONTAINS) { return new MatchNoDocsQuery("A point field can never contain a non-point geometry"); } - final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); org.apache.lucene.search.Query intersects = XYPointField.newGeometryQuery(fieldName, luceneGeometries); if (relation == ShapeField.QueryRelation.DISJOINT) { // XYPointField does not support DISJOINT queries, so we build one as EXISTS && !INTERSECTS @@ -250,6 +249,10 @@ private static org.apache.lucene.search.Query pointShapeQuery( return intersects; } + private static boolean isPointGeometry(XYGeometry[] geometries) { + return geometries.length == 1 && geometries[0] instanceof org.apache.lucene.geo.XYPoint; + } + /** * This code is based on the ShapeQueryProcessor.shapeQuery() method */ From f5de9c00c8cba4a5224af30761ebd1590173aa5e Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Wed, 21 Aug 2024 03:57:09 -0600 Subject: [PATCH 118/389] (Doc+) "min_primary_shard_size" for 10-50GB shards (#111574) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 👋🏽 howdy, team! Expands [10-50GB sharding recommendation](https://www.elastic.co/guide/en/elasticsearch/reference/master/size-your-shards.html#shard-size-recommendation) to include ILM's more recent [`min_primary_shard_size`](https://www.elastic.co/guide/en/elasticsearch/reference/master/ilm-rollover.html) option to avoid small shards. --- docs/reference/how-to/size-your-shards.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/reference/how-to/size-your-shards.asciidoc b/docs/reference/how-to/size-your-shards.asciidoc index 36aba99adb8c8..5f67014d5bb4a 100644 --- a/docs/reference/how-to/size-your-shards.asciidoc +++ b/docs/reference/how-to/size-your-shards.asciidoc @@ -162,7 +162,8 @@ and smaller shards may be appropriate for {enterprise-search-ref}/index.html[Enterprise Search] and similar use cases. If you use {ilm-init}, set the <>'s -`max_primary_shard_size` threshold to `50gb` to avoid shards larger than 50GB. +`max_primary_shard_size` threshold to `50gb` to avoid shards larger than 50GB +and `min_primary_shard_size` threshold to `10gb` to avoid shards smaller than 10GB. To see the current size of your shards, use the <>. From 2b7a3cd06287a9e1387bf21582b690847330e07e Mon Sep 17 00:00:00 2001 From: Siddharth Rayabharam Date: Wed, 21 Aug 2024 08:37:22 -0400 Subject: [PATCH 119/389] Unmuted ModelRegistry testGetModel (#112042) --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 145602ad7e1aa..f72cca070a706 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -110,9 +110,6 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=rollup/security_tests/Index-based access} issue: https://github.com/elastic/elasticsearch/issues/111631 -- class: org.elasticsearch.xpack.inference.integration.ModelRegistryIT - method: testGetModel - issue: https://github.com/elastic/elasticsearch/issues/111570 - class: org.elasticsearch.tdigest.ComparisonTests method: testSparseGaussianDistribution issue: https://github.com/elastic/elasticsearch/issues/111721 From fa7f8369165a6d08592f5b835c61b7cd7b041ddd Mon Sep 17 00:00:00 2001 From: Kuni Sen <30574753+kunisen@users.noreply.github.com> Date: Wed, 21 Aug 2024 21:48:06 +0900 Subject: [PATCH 120/389] Update searchable snapshot doc about the timing to notice data loss (#112050) - Have you signed the [contributor license agreement](https://www.elastic.co/contributor-agreement)? => yes - Have you followed the [contributor guidelines](https://github.com/elastic/elasticsearch/blob/main/CONTRIBUTING.md)? => yes - If submitting code, have you built your formula locally prior to submission with `gradle check`? => not code - If submitting code, is your pull request against main? Unless there is a good reason otherwise, we prefer pull requests against main and will backport as needed. => not code - If submitting code, have you checked that your submission is for an [OS and architecture that we support](https://www.elastic.co/support/matrix#show_os)? => not code - If you are submitting this code for a class then read our [policy](https://github.com/elastic/elasticsearch/blob/main/CONTRIBUTING.md#contributing-as-part-of-a-class) for that. => not code ## Description Update searchable snapshot doc about the timing to notice data loss: Sometimes searchable snapshot data is cached onto disk so user may notice their data loss later during node restart (or on Elastic cloud - host maintenance) after they delete their snapshots. --- docs/reference/searchable-snapshots/index.asciidoc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/reference/searchable-snapshots/index.asciidoc b/docs/reference/searchable-snapshots/index.asciidoc index 8e4a1b93b9c05..a8a9ef36dc9a6 100644 --- a/docs/reference/searchable-snapshots/index.asciidoc +++ b/docs/reference/searchable-snapshots/index.asciidoc @@ -336,6 +336,11 @@ cluster has write access then you must make sure that the other cluster does not delete these snapshots. The snapshot contains the sole full copy of your data. If you delete it then the data cannot be recovered from elsewhere. +* The data in a searchable snapshot index are cached in local storage, so if you +delete the underlying searchable snapshot {es} will continue to operate normally +until the first cache miss. This may be much later, for instance when a shard +relocates to a different node, or when the node holding the shard restarts. + * If the repository fails or corrupts the contents of the snapshot and you cannot restore it to its previous healthy state then the data is permanently lost. From 84ddd6c7af4143861b2765bd5ff1f2a66bd41fae Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Wed, 21 Aug 2024 14:11:19 +0100 Subject: [PATCH 121/389] [DOCS] Update `rank_constant` value in retriever example (#112056) --- docs/reference/search/retriever.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc index bf97da15a1ccf..b52b296220029 100644 --- a/docs/reference/search/retriever.asciidoc +++ b/docs/reference/search/retriever.asciidoc @@ -250,7 +250,7 @@ GET /restaurants/_search } } ], - "rank_constant": 0.3, <5> + "rank_constant": 1, <5> "rank_window_size": 50 <6> } } From ba87a4883324a8354ff4457c5835f602025b10fe Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 21 Aug 2024 07:12:02 -0700 Subject: [PATCH 122/389] Handle BigInteger in xcontent copy (#111937) When xcontent is copied, the parse tree is walked and each element is passed to the given generator. In the case of numbers, BigInteger is currently not handled. Although arbitrary precision BigIntegers are not supported in Elasticsearch, they appear in xcontent when using unsigned long fields. This commit adds handling for that case, and also ensures all token types are handled. Note that BigDecimal are not supported at all since double is the largest floating point mapper supported. closes #111812 --- docs/changelog/111937.yaml | 6 +++ .../xcontent/XContentGenerator.java | 9 ++++ .../xcontent/XContentGeneratorTests.java | 47 +++++++++++++++++++ 3 files changed, 62 insertions(+) create mode 100644 docs/changelog/111937.yaml create mode 100644 libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentGeneratorTests.java diff --git a/docs/changelog/111937.yaml b/docs/changelog/111937.yaml new file mode 100644 index 0000000000000..7d856e29d54c5 --- /dev/null +++ b/docs/changelog/111937.yaml @@ -0,0 +1,6 @@ +pr: 111937 +summary: Handle `BigInteger` in xcontent copy +area: Infra/Core +type: bug +issues: + - 111812 diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java index 5037ed0b40664..add5a913faf8a 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java @@ -148,6 +148,12 @@ default void copyCurrentEvent(XContentParser parser) throws IOException { case LONG -> writeNumber(parser.longValue()); case FLOAT -> writeNumber(parser.floatValue()); case DOUBLE -> writeNumber(parser.doubleValue()); + case BIG_INTEGER -> writeNumber((BigInteger) parser.numberValue()); + // note: BIG_DECIMAL is not supported, ES only supports up to double. + // BIG_INTEGER above is only for representing unsigned long + default -> { + assert false : "missing xcontent number handling for type [" + parser.numberType() + "]"; + } } break; case VALUE_BOOLEAN: @@ -158,6 +164,9 @@ default void copyCurrentEvent(XContentParser parser) throws IOException { break; case VALUE_EMBEDDED_OBJECT: writeBinary(parser.binaryValue()); + break; + default: + assert false : "missing xcontent token handling for token [" + parser.text() + "]"; } } diff --git a/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentGeneratorTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentGeneratorTests.java new file mode 100644 index 0000000000000..ab141f9af484c --- /dev/null +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentGeneratorTests.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.xcontent; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.ByteArrayOutputStream; +import java.nio.charset.StandardCharsets; +import java.util.Locale; + +import static org.hamcrest.Matchers.equalTo; + +public class XContentGeneratorTests extends ESTestCase { + + public void testCopyCurrentEventRoundtrip() throws Exception { + assertTypeCopy("null", "null"); + assertTypeCopy("string", "\"hi\""); + assertTypeCopy("integer", "1"); + assertTypeCopy("float", "1.0"); + assertTypeCopy("long", "5000000000"); + assertTypeCopy("double", "1.123456789"); + assertTypeCopy("biginteger", "18446744073709551615"); + } + + private void assertTypeCopy(String typename, String value) throws Exception { + var input = String.format(Locale.ROOT, "{\"%s\":%s,\"%s_in_array\":[%s]}", typename, value, typename, value); + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + try ( + var generator = JsonXContent.jsonXContent.createGenerator(outputStream); + var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, input) + ) { + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + generator.copyCurrentEvent(parser); + } + generator.copyCurrentEvent(parser); // copy end object too + } + assertThat(outputStream.toString(StandardCharsets.UTF_8), equalTo(input)); + } +} From e9b2e57329b3eed8d5693e39464cd28456f89a5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iv=C3=A1n=20Cea=20Fontenla?= Date: Wed, 21 Aug 2024 16:48:51 +0200 Subject: [PATCH 123/389] Unskip mv_percentile tests and add extra debugging data (#112061) Closes https://github.com/elastic/elasticsearch/issues/112036 Closes https://github.com/elastic/elasticsearch/issues/112037 This isn't fixing anything, as I couldn't reproduce the issue. But at least now, if it fails again, we know which data rendered that result exactly (Or so I wish...) --- muted-tests.yml | 6 ------ .../src/main/resources/mv_percentile.csv-spec | 10 +++++----- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index f72cca070a706..f480938c24a13 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -170,12 +170,6 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.FieldExtractorIT method: testScaledFloat issue: https://github.com/elastic/elasticsearch/issues/112003 -- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - method: test {mv_percentile.FromIndex SYNC} - issue: https://github.com/elastic/elasticsearch/issues/112036 -- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - method: test {mv_percentile.FromIndex ASYNC} - issue: https://github.com/elastic/elasticsearch/issues/112037 - class: org.elasticsearch.xpack.esql.qa.single_node.RestEsqlIT method: testForceSleepsProfile {SYNC} issue: https://github.com/elastic/elasticsearch/issues/112039 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_percentile.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_percentile.csv-spec index e22b40c7ecad8..c51e62e865ea2 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_percentile.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_percentile.csv-spec @@ -95,15 +95,15 @@ FROM employees integer = MV_PERCENTILE(salary_change.int, 75), long = MV_PERCENTILE(salary_change.long, 75), double = MV_PERCENTILE(salary_change, 75) -| KEEP integer, long, double +| KEEP emp_no, integer, long, double | SORT double | LIMIT 3 ; -integer:integer | long:long | double:double --8 | -8 | -8.46 --7 | -7 | -7.08 --6 | -6 | -6.9 +emp_no:integer | integer:integer | long:long | double:double +10034 | -8 | -8 | -8.46 +10037 | -7 | -7 | -7.08 +10039 | -6 | -6 | -6.9 ; fromIndexPercentile From fe75aa38c9110b3254fcbdfc8ee535cc42f87ac5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20FOUCRET?= Date: Wed, 21 Aug 2024 17:21:16 +0200 Subject: [PATCH 124/389] Enable LTR in serveless. (#111102) --- server/src/main/java/org/elasticsearch/TransportVersions.java | 2 +- .../core/ml/inference/trainedmodel/LearningToRankConfig.java | 3 ++- .../org/elasticsearch/xpack/ml/MachineLearningExtension.java | 2 +- .../xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java | 4 ++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index fad57b3d6c854..9fe270e933785 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -194,7 +194,7 @@ static TransportVersion def(int id) { public static final TransportVersion RANDOM_RERANKER_RETRIEVER = def(8_724_00_0); public static final TransportVersion ESQL_PROFILE_SLEEPS = def(8_725_00_0); public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_726_00_0); - + public static final TransportVersion LTR_SERVERLESS_RELEASE = def(8_727_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java index 33e510fcb227c..9929e59a9c803 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -40,7 +41,7 @@ public class LearningToRankConfig extends RegressionConfig implements Rewriteable { public static final ParseField NAME = new ParseField("learning_to_rank"); - static final TransportVersion MIN_SUPPORTED_TRANSPORT_VERSION = TransportVersion.current(); + static final TransportVersion MIN_SUPPORTED_TRANSPORT_VERSION = TransportVersions.LTR_SERVERLESS_RELEASE; public static final ParseField NUM_TOP_FEATURE_IMPORTANCE_VALUES = new ParseField("num_top_feature_importance_values"); public static final ParseField FEATURE_EXTRACTORS = new ParseField("feature_extractors"); public static final ParseField DEFAULT_PARAMS = new ParseField("default_params"); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java index 0f8024dd7207a..528883439ef2f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java @@ -26,7 +26,7 @@ default void configure(Settings settings) {} boolean isNlpEnabled(); default boolean isLearningToRankEnabled() { - return false; + return true; } default boolean disableInferenceProcessCache() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java index d2179a69ebc24..46edcf1f63c01 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; @@ -303,8 +304,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - // TODO: update transport version when released! - return TransportVersion.current(); + return TransportVersions.LTR_SERVERLESS_RELEASE; } @Override From d38aeab7b364f27bc02b701e1c5ca8f9c970e0a9 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 21 Aug 2024 11:23:47 -0400 Subject: [PATCH 125/389] ESQL: Drop cast in aggs (#112035) Drops all `Math.toIntExact` calls in aggs that which aren't needed because groups *are* `int` already. --- .../compute/gen/GroupingAggregatorImplementer.java | 7 +++---- ...CountDistinctBooleanGroupingAggregatorFunction.java | 10 +++++----- ...ountDistinctBytesRefGroupingAggregatorFunction.java | 10 +++++----- .../CountDistinctDoubleGroupingAggregatorFunction.java | 10 +++++----- .../CountDistinctFloatGroupingAggregatorFunction.java | 10 +++++----- .../CountDistinctIntGroupingAggregatorFunction.java | 10 +++++----- .../CountDistinctLongGroupingAggregatorFunction.java | 10 +++++----- .../MaxBooleanGroupingAggregatorFunction.java | 10 +++++----- .../MaxBytesRefGroupingAggregatorFunction.java | 10 +++++----- .../MaxDoubleGroupingAggregatorFunction.java | 10 +++++----- .../MaxFloatGroupingAggregatorFunction.java | 10 +++++----- .../aggregation/MaxIntGroupingAggregatorFunction.java | 10 +++++----- .../aggregation/MaxIpGroupingAggregatorFunction.java | 10 +++++----- .../aggregation/MaxLongGroupingAggregatorFunction.java | 10 +++++----- ...oluteDeviationDoubleGroupingAggregatorFunction.java | 10 +++++----- ...soluteDeviationFloatGroupingAggregatorFunction.java | 10 +++++----- ...AbsoluteDeviationIntGroupingAggregatorFunction.java | 10 +++++----- ...bsoluteDeviationLongGroupingAggregatorFunction.java | 10 +++++----- .../MinBooleanGroupingAggregatorFunction.java | 10 +++++----- .../MinBytesRefGroupingAggregatorFunction.java | 10 +++++----- .../MinDoubleGroupingAggregatorFunction.java | 10 +++++----- .../MinFloatGroupingAggregatorFunction.java | 10 +++++----- .../aggregation/MinIntGroupingAggregatorFunction.java | 10 +++++----- .../aggregation/MinIpGroupingAggregatorFunction.java | 10 +++++----- .../aggregation/MinLongGroupingAggregatorFunction.java | 10 +++++----- .../PercentileDoubleGroupingAggregatorFunction.java | 10 +++++----- .../PercentileFloatGroupingAggregatorFunction.java | 10 +++++----- .../PercentileIntGroupingAggregatorFunction.java | 10 +++++----- .../PercentileLongGroupingAggregatorFunction.java | 10 +++++----- .../RateDoubleGroupingAggregatorFunction.java | 10 +++++----- .../RateFloatGroupingAggregatorFunction.java | 10 +++++----- .../aggregation/RateIntGroupingAggregatorFunction.java | 10 +++++----- .../RateLongGroupingAggregatorFunction.java | 10 +++++----- .../SumDoubleGroupingAggregatorFunction.java | 10 +++++----- .../SumFloatGroupingAggregatorFunction.java | 10 +++++----- .../aggregation/SumIntGroupingAggregatorFunction.java | 10 +++++----- .../aggregation/SumLongGroupingAggregatorFunction.java | 10 +++++----- .../TopBooleanGroupingAggregatorFunction.java | 10 +++++----- .../TopDoubleGroupingAggregatorFunction.java | 10 +++++----- .../TopFloatGroupingAggregatorFunction.java | 10 +++++----- .../aggregation/TopIntGroupingAggregatorFunction.java | 10 +++++----- .../aggregation/TopIpGroupingAggregatorFunction.java | 10 +++++----- .../aggregation/TopLongGroupingAggregatorFunction.java | 10 +++++----- .../ValuesBooleanGroupingAggregatorFunction.java | 10 +++++----- .../ValuesBytesRefGroupingAggregatorFunction.java | 10 +++++----- .../ValuesDoubleGroupingAggregatorFunction.java | 10 +++++----- .../ValuesFloatGroupingAggregatorFunction.java | 10 +++++----- .../ValuesIntGroupingAggregatorFunction.java | 10 +++++----- .../ValuesLongGroupingAggregatorFunction.java | 10 +++++----- ...tesianPointDocValuesGroupingAggregatorFunction.java | 10 +++++----- ...ianPointSourceValuesGroupingAggregatorFunction.java | 10 +++++----- ...oidGeoPointDocValuesGroupingAggregatorFunction.java | 10 +++++----- ...GeoPointSourceValuesGroupingAggregatorFunction.java | 10 +++++----- 53 files changed, 263 insertions(+), 264 deletions(-) diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java index 0c4aeca996a19..3dffbcf84eb78 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java @@ -364,16 +364,15 @@ private MethodSpec addRawInputLoop(TypeName groupsType, TypeName valuesType) { builder.beginControlFlow("for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++)"); { if (groupsIsBlock) { - // TODO we can drop this once we stop sending null group keys builder.beginControlFlow("if (groups.isNull(groupPosition))"); builder.addStatement("continue"); builder.endControlFlow(); builder.addStatement("int groupStart = groups.getFirstValueIndex(groupPosition)"); builder.addStatement("int groupEnd = groupStart + groups.getValueCount(groupPosition)"); builder.beginControlFlow("for (int g = groupStart; g < groupEnd; g++)"); - builder.addStatement("int groupId = Math.toIntExact(groups.getInt(g))"); + builder.addStatement("int groupId = groups.getInt(g)"); } else { - builder.addStatement("int groupId = Math.toIntExact(groups.getInt(groupPosition))"); + builder.addStatement("int groupId = groups.getInt(groupPosition)"); } if (warnExceptions.isEmpty() == false) { @@ -516,7 +515,7 @@ private MethodSpec addIntermediateInput() { } builder.beginControlFlow("for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++)"); { - builder.addStatement("int groupId = Math.toIntExact(groups.getInt(groupPosition))"); + builder.addStatement("int groupId = groups.getInt(groupPosition)"); if (hasPrimitiveState()) { if (warnExceptions.isEmpty()) { assert intermediateState.size() == 2; diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunction.java index a12677e70e8a9..98e57b71db416 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunction.java @@ -90,7 +90,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BooleanBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -104,7 +104,7 @@ private void addRawInput(int positionOffset, IntVector groups, BooleanBlock valu private void addRawInput(int positionOffset, IntVector groups, BooleanVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctBooleanAggregator.combine(state, groupId, values.getBoolean(groupPosition + positionOffset)); } } @@ -117,7 +117,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanBlock value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -138,7 +138,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanVector valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); CountDistinctBooleanAggregator.combine(state, groupId, values.getBoolean(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector tbit = ((BooleanBlock) tbitUncast).asVector(); assert fbit.getPositionCount() == tbit.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctBooleanAggregator.combineIntermediate(state, groupId, fbit.getBoolean(groupPosition + positionOffset), tbit.getBoolean(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunction.java index 4879df5cf1c2c..35fd83598b9d6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunction.java @@ -94,7 +94,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock val private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -123,7 +123,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -145,7 +145,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); CountDistinctBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctBytesRefAggregator.combineIntermediate(state, groupId, hll.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunction.java index 1e0ce58377f9e..894b81b311363 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunction.java @@ -95,7 +95,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -122,7 +122,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -143,7 +143,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); CountDistinctDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctDoubleAggregator.combineIntermediate(state, groupId, hll.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunction.java index 60c1755b88c6a..5f6b4211e6c5e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunction.java @@ -95,7 +95,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -122,7 +122,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -143,7 +143,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); CountDistinctFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctFloatAggregator.combineIntermediate(state, groupId, hll.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunction.java index 99e6ace52b256..83300393e560d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunction.java @@ -93,7 +93,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -107,7 +107,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) private void addRawInput(int positionOffset, IntVector groups, IntVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -120,7 +120,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -141,7 +141,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); CountDistinctIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -158,7 +158,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctIntAggregator.combineIntermediate(state, groupId, hll.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunction.java index 85f823296c886..44e9fefb3161c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunction.java @@ -95,7 +95,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -122,7 +122,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -143,7 +143,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); CountDistinctLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); CountDistinctLongAggregator.combineIntermediate(state, groupId, hll.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java index f404fccd45d51..084e346a7b093 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java @@ -90,7 +90,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BooleanBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -104,7 +104,7 @@ private void addRawInput(int positionOffset, IntVector groups, BooleanBlock valu private void addRawInput(int positionOffset, IntVector groups, BooleanVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MaxBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(groupPosition + positionOffset))); } } @@ -117,7 +117,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanBlock value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -138,7 +138,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanVector valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MaxBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(groupPosition + positionOffset))); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert max.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MaxBooleanAggregator.combine(state.getOrDefault(groupId), max.getBoolean(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunction.java index 1720a8863a613..a50cf8593a6e1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunction.java @@ -94,7 +94,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock val private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MaxBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -123,7 +123,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -145,7 +145,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); MaxBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -168,7 +168,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page assert max.getPositionCount() == seen.getPositionCount(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MaxBytesRefAggregator.combineIntermediate(state, groupId, max.getBytesRef(groupPosition + positionOffset, scratch), seen.getBoolean(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleGroupingAggregatorFunction.java index da93320eaf96e..b874bc43dc238 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MaxDoubleAggregator.combine(state.getOrDefault(groupId), values.getDouble(groupPosition + positionOffset))); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MaxDoubleAggregator.combine(state.getOrDefault(groupId), values.getDouble(groupPosition + positionOffset))); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert max.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MaxDoubleAggregator.combine(state.getOrDefault(groupId), max.getDouble(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunction.java index 85708792732a7..f3ebd468ebc72 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MaxFloatAggregator.combine(state.getOrDefault(groupId), values.getFloat(groupPosition + positionOffset))); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MaxFloatAggregator.combine(state.getOrDefault(groupId), values.getFloat(groupPosition + positionOffset))); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert max.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MaxFloatAggregator.combine(state.getOrDefault(groupId), max.getFloat(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntGroupingAggregatorFunction.java index c8b1b6910c0aa..8b364e7a02e96 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntGroupingAggregatorFunction.java @@ -90,7 +90,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -104,7 +104,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) private void addRawInput(int positionOffset, IntVector groups, IntVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MaxIntAggregator.combine(state.getOrDefault(groupId), values.getInt(groupPosition + positionOffset))); } } @@ -117,7 +117,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -138,7 +138,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MaxIntAggregator.combine(state.getOrDefault(groupId), values.getInt(groupPosition + positionOffset))); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert max.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MaxIntAggregator.combine(state.getOrDefault(groupId), max.getInt(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIpGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIpGroupingAggregatorFunction.java index c556b23215e6b..a722d95f3b108 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIpGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIpGroupingAggregatorFunction.java @@ -94,7 +94,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock val private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MaxIpAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -123,7 +123,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -145,7 +145,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); MaxIpAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -168,7 +168,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page assert max.getPositionCount() == seen.getPositionCount(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MaxIpAggregator.combineIntermediate(state, groupId, max.getBytesRef(groupPosition + positionOffset, scratch), seen.getBoolean(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongGroupingAggregatorFunction.java index 41d893f9bbf0c..fee2f5a9c2e7c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MaxLongAggregator.combine(state.getOrDefault(groupId), values.getLong(groupPosition + positionOffset))); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MaxLongAggregator.combine(state.getOrDefault(groupId), values.getLong(groupPosition + positionOffset))); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert max.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MaxLongAggregator.combine(state.getOrDefault(groupId), max.getLong(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.java index e08488685d2cb..836248428f231 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MedianAbsoluteDeviationDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); MedianAbsoluteDeviationDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -157,7 +157,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MedianAbsoluteDeviationDoubleAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunction.java index 84646476fcee0..7a67f0d3449f0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MedianAbsoluteDeviationFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); MedianAbsoluteDeviationFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -157,7 +157,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MedianAbsoluteDeviationFloatAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunction.java index 02866ee15b961..315034a28ff8f 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunction.java @@ -90,7 +90,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -104,7 +104,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) private void addRawInput(int positionOffset, IntVector groups, IntVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MedianAbsoluteDeviationIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -117,7 +117,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -138,7 +138,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); MedianAbsoluteDeviationIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -155,7 +155,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MedianAbsoluteDeviationIntAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunction.java index 36c40e10e54d5..af0374012be52 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MedianAbsoluteDeviationLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); MedianAbsoluteDeviationLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -157,7 +157,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MedianAbsoluteDeviationLongAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java index 6175cad3924e2..45e677ee25b56 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java @@ -90,7 +90,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BooleanBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -104,7 +104,7 @@ private void addRawInput(int positionOffset, IntVector groups, BooleanBlock valu private void addRawInput(int positionOffset, IntVector groups, BooleanVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(groupPosition + positionOffset))); } } @@ -117,7 +117,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanBlock value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -138,7 +138,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanVector valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(groupPosition + positionOffset))); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert min.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), min.getBoolean(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunction.java index eb309614fcf3c..e092dd93210f6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunction.java @@ -94,7 +94,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock val private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MinBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -123,7 +123,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -145,7 +145,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); MinBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -168,7 +168,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page assert min.getPositionCount() == seen.getPositionCount(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MinBytesRefAggregator.combineIntermediate(state, groupId, min.getBytesRef(groupPosition + positionOffset, scratch), seen.getBoolean(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleGroupingAggregatorFunction.java index 7d0374b3d21f7..970a8a7597514 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MinDoubleAggregator.combine(state.getOrDefault(groupId), values.getDouble(groupPosition + positionOffset))); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MinDoubleAggregator.combine(state.getOrDefault(groupId), values.getDouble(groupPosition + positionOffset))); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert min.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MinDoubleAggregator.combine(state.getOrDefault(groupId), min.getDouble(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunction.java index 2f00bbf1335ed..4e8b4cc9417c8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MinFloatAggregator.combine(state.getOrDefault(groupId), values.getFloat(groupPosition + positionOffset))); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MinFloatAggregator.combine(state.getOrDefault(groupId), values.getFloat(groupPosition + positionOffset))); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert min.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MinFloatAggregator.combine(state.getOrDefault(groupId), min.getFloat(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntGroupingAggregatorFunction.java index 6625fd327237b..6e976a582a892 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntGroupingAggregatorFunction.java @@ -90,7 +90,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -104,7 +104,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) private void addRawInput(int positionOffset, IntVector groups, IntVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MinIntAggregator.combine(state.getOrDefault(groupId), values.getInt(groupPosition + positionOffset))); } } @@ -117,7 +117,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -138,7 +138,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MinIntAggregator.combine(state.getOrDefault(groupId), values.getInt(groupPosition + positionOffset))); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert min.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MinIntAggregator.combine(state.getOrDefault(groupId), min.getInt(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIpGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIpGroupingAggregatorFunction.java index 5b51f041bd966..146515d363af7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIpGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIpGroupingAggregatorFunction.java @@ -94,7 +94,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock val private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MinIpAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -123,7 +123,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -145,7 +145,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); MinIpAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -168,7 +168,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page assert max.getPositionCount() == seen.getPositionCount(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); MinIpAggregator.combineIntermediate(state, groupId, max.getBytesRef(groupPosition + positionOffset, scratch), seen.getBoolean(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongGroupingAggregatorFunction.java index f0c3727d7db0b..a3db9a2704660 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, MinLongAggregator.combine(state.getOrDefault(groupId), values.getLong(groupPosition + positionOffset))); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, MinLongAggregator.combine(state.getOrDefault(groupId), values.getLong(groupPosition + positionOffset))); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert min.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, MinLongAggregator.combine(state.getOrDefault(groupId), min.getLong(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunction.java index 9d486b9614dab..871e93a72d900 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunction.java @@ -95,7 +95,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); PercentileDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -122,7 +122,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -143,7 +143,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); PercentileDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); PercentileDoubleAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunction.java index 564e0e90018c2..8b0f28b2632d1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunction.java @@ -95,7 +95,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); PercentileFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -122,7 +122,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -143,7 +143,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); PercentileFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); PercentileFloatAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunction.java index 8c2bd7091143f..fc1031dcbe0d0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunction.java @@ -93,7 +93,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -107,7 +107,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) private void addRawInput(int positionOffset, IntVector groups, IntVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); PercentileIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -120,7 +120,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -141,7 +141,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); PercentileIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -158,7 +158,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); PercentileIntAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunction.java index c1c332ba0094d..1b14f02356b8f 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunction.java @@ -95,7 +95,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); PercentileLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -122,7 +122,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -143,7 +143,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); PercentileLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); PercentileLongAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java index 8d9e011891e95..c85cf78a39c45 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java @@ -103,7 +103,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values, LongVector timestamps) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -118,7 +118,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values, LongVector timestamps) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); var valuePosition = groupPosition + positionOffset; RateDoubleAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getDouble(valuePosition)); } @@ -133,7 +133,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -155,7 +155,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); var valuePosition = groupPosition + positionOffset; RateDoubleAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getDouble(valuePosition)); } @@ -183,7 +183,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); RateDoubleAggregator.combineIntermediate(state, groupId, timestamps, values, resets.getDouble(groupPosition + positionOffset), groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java index 40f53741bf3da..a5d2131a2445a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java @@ -105,7 +105,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values, LongVector timestamps) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -120,7 +120,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values, LongVector timestamps) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); var valuePosition = groupPosition + positionOffset; RateFloatAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getFloat(valuePosition)); } @@ -135,7 +135,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values, int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -157,7 +157,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); var valuePosition = groupPosition + positionOffset; RateFloatAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getFloat(valuePosition)); } @@ -185,7 +185,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); RateFloatAggregator.combineIntermediate(state, groupId, timestamps, values, resets.getDouble(groupPosition + positionOffset), groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java index 6bd4b833dc9e6..0fb0b05c11164 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java @@ -103,7 +103,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values, LongVector timestamps) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -118,7 +118,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values, private void addRawInput(int positionOffset, IntVector groups, IntVector values, LongVector timestamps) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); var valuePosition = groupPosition + positionOffset; RateIntAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getInt(valuePosition)); } @@ -133,7 +133,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values, int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -155,7 +155,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values, int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); var valuePosition = groupPosition + positionOffset; RateIntAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getInt(valuePosition)); } @@ -183,7 +183,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); RateIntAggregator.combineIntermediate(state, groupId, timestamps, values, resets.getDouble(groupPosition + positionOffset), groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java index 27318d6496737..82297b618b03e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java @@ -103,7 +103,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values, LongVector timestamps) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -118,7 +118,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values, private void addRawInput(int positionOffset, IntVector groups, LongVector values, LongVector timestamps) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); var valuePosition = groupPosition + positionOffset; RateLongAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getLong(valuePosition)); } @@ -133,7 +133,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values, int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -155,7 +155,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values, int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); var valuePosition = groupPosition + positionOffset; RateLongAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getLong(valuePosition)); } @@ -183,7 +183,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); RateLongAggregator.combineIntermediate(state, groupId, timestamps, values, resets.getDouble(groupPosition + positionOffset), groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunction.java index 5085cfc3bebcf..4f0bcae66ee4a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunction.java @@ -93,7 +93,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -107,7 +107,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SumDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -120,7 +120,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -141,7 +141,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); SumDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -168,7 +168,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert value.getPositionCount() == delta.getPositionCount() && value.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SumDoubleAggregator.combineIntermediate(state, groupId, value.getDouble(groupPosition + positionOffset), delta.getDouble(groupPosition + positionOffset), seen.getBoolean(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunction.java index c69ce16f0bccb..2f4165dfeadfa 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunction.java @@ -95,7 +95,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SumFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -122,7 +122,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -143,7 +143,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); SumFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -170,7 +170,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert value.getPositionCount() == delta.getPositionCount() && value.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SumFloatAggregator.combineIntermediate(state, groupId, value.getDouble(groupPosition + positionOffset), delta.getDouble(groupPosition + positionOffset), seen.getBoolean(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntGroupingAggregatorFunction.java index 6891fe548908f..95d380c455bf4 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) private void addRawInput(int positionOffset, IntVector groups, IntVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, SumIntAggregator.combine(state.getOrDefault(groupId), values.getInt(groupPosition + positionOffset))); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, SumIntAggregator.combine(state.getOrDefault(groupId), values.getInt(groupPosition + positionOffset))); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert sum.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, SumIntAggregator.combine(state.getOrDefault(groupId), sum.getLong(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongGroupingAggregatorFunction.java index 507aa343aa74e..324d8f53e65cb 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongGroupingAggregatorFunction.java @@ -92,7 +92,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); state.set(groupId, SumLongAggregator.combine(state.getOrDefault(groupId), values.getLong(groupPosition + positionOffset))); } } @@ -119,7 +119,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -140,7 +140,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); state.set(groupId, SumLongAggregator.combine(state.getOrDefault(groupId), values.getLong(groupPosition + positionOffset))); } } @@ -162,7 +162,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); assert sum.getPositionCount() == seen.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (seen.getBoolean(groupPosition + positionOffset)) { state.set(groupId, SumLongAggregator.combine(state.getOrDefault(groupId), sum.getLong(groupPosition + positionOffset))); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopBooleanGroupingAggregatorFunction.java index 53b5149e4da7e..d169c456329b7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopBooleanGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopBooleanGroupingAggregatorFunction.java @@ -96,7 +96,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BooleanBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -110,7 +110,7 @@ private void addRawInput(int positionOffset, IntVector groups, BooleanBlock valu private void addRawInput(int positionOffset, IntVector groups, BooleanVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopBooleanAggregator.combine(state, groupId, values.getBoolean(groupPosition + positionOffset)); } } @@ -123,7 +123,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanBlock value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -144,7 +144,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanVector valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); TopBooleanAggregator.combine(state, groupId, values.getBoolean(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } BooleanBlock top = (BooleanBlock) topUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopBooleanAggregator.combineIntermediate(state, groupId, top, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleGroupingAggregatorFunction.java index c54dce5715846..07da387f88ce6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleGroupingAggregatorFunction.java @@ -96,7 +96,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -110,7 +110,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -123,7 +123,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -144,7 +144,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); TopDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } DoubleBlock top = (DoubleBlock) topUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopDoubleAggregator.combineIntermediate(state, groupId, top, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatGroupingAggregatorFunction.java index 4c00f4d2c237d..369fa7401e508 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatGroupingAggregatorFunction.java @@ -96,7 +96,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -110,7 +110,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -123,7 +123,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -144,7 +144,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); TopFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } FloatBlock top = (FloatBlock) topUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopFloatAggregator.combineIntermediate(state, groupId, top, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntGroupingAggregatorFunction.java index 37384238b7297..04b53fe6aab69 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntGroupingAggregatorFunction.java @@ -94,7 +94,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -108,7 +108,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) private void addRawInput(int positionOffset, IntVector groups, IntVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -121,7 +121,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -142,7 +142,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); TopIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -158,7 +158,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } IntBlock top = (IntBlock) topUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopIntAggregator.combineIntermediate(state, groupId, top, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIpGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIpGroupingAggregatorFunction.java index d9e480c324676..272b4827b5817 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIpGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIpGroupingAggregatorFunction.java @@ -98,7 +98,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -113,7 +113,7 @@ private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock val private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopIpAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -127,7 +127,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -149,7 +149,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); TopIpAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -166,7 +166,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefBlock top = (BytesRefBlock) topUncast; BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopIpAggregator.combineIntermediate(state, groupId, top, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongGroupingAggregatorFunction.java index 7b199b2a81389..9d1ed395c5964 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongGroupingAggregatorFunction.java @@ -96,7 +96,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -110,7 +110,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -123,7 +123,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -144,7 +144,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); TopLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -160,7 +160,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } LongBlock top = (LongBlock) topUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); TopLongAggregator.combineIntermediate(state, groupId, top, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBooleanGroupingAggregatorFunction.java index 16e92a7c69ca8..062a49dbf4f7c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBooleanGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBooleanGroupingAggregatorFunction.java @@ -89,7 +89,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BooleanBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -103,7 +103,7 @@ private void addRawInput(int positionOffset, IntVector groups, BooleanBlock valu private void addRawInput(int positionOffset, IntVector groups, BooleanVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesBooleanAggregator.combine(state, groupId, values.getBoolean(groupPosition + positionOffset)); } } @@ -116,7 +116,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanBlock value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -137,7 +137,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanVector valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); ValuesBooleanAggregator.combine(state, groupId, values.getBoolean(groupPosition + positionOffset)); } } @@ -153,7 +153,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } BooleanBlock values = (BooleanBlock) valuesUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesBooleanAggregator.combineIntermediate(state, groupId, values, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBytesRefGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBytesRefGroupingAggregatorFunction.java index f9a51fcc52221..0a929913e9fde 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBytesRefGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBytesRefGroupingAggregatorFunction.java @@ -91,7 +91,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -106,7 +106,7 @@ private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock val private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -120,7 +120,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -142,7 +142,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); ValuesBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -159,7 +159,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page BytesRefBlock values = (BytesRefBlock) valuesUncast; BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesBytesRefAggregator.combineIntermediate(state, groupId, values, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunction.java index 11a0eb96c6a8e..b8ca2d2b9665b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunction.java @@ -89,7 +89,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -103,7 +103,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -116,7 +116,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -137,7 +137,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); ValuesDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -153,7 +153,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } DoubleBlock values = (DoubleBlock) valuesUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesDoubleAggregator.combineIntermediate(state, groupId, values, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunction.java index 54cc06072cd24..0c4e9c32328c7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunction.java @@ -89,7 +89,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -103,7 +103,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -116,7 +116,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -137,7 +137,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); ValuesFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -153,7 +153,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } FloatBlock values = (FloatBlock) valuesUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesFloatAggregator.combineIntermediate(state, groupId, values, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunction.java index 67722cd1318c0..95e527c018cd1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunction.java @@ -87,7 +87,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -101,7 +101,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) private void addRawInput(int positionOffset, IntVector groups, IntVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -114,7 +114,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -135,7 +135,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); ValuesIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -151,7 +151,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } IntBlock values = (IntBlock) valuesUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesIntAggregator.combineIntermediate(state, groupId, values, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunction.java index 06508ce360ba4..a7963447037a8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunction.java @@ -89,7 +89,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -103,7 +103,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -116,7 +116,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -137,7 +137,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); ValuesLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -153,7 +153,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page } LongBlock values = (LongBlock) valuesUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); ValuesLongAggregator.combineIntermediate(state, groupId, values, groupPosition + positionOffset); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointDocValuesGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointDocValuesGroupingAggregatorFunction.java index 795207b245023..dc3c1cf2917ec 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointDocValuesGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointDocValuesGroupingAggregatorFunction.java @@ -98,7 +98,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -112,7 +112,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SpatialCentroidCartesianPointDocValuesAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -125,7 +125,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -146,7 +146,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); SpatialCentroidCartesianPointDocValuesAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -183,7 +183,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page LongVector count = ((LongBlock) countUncast).asVector(); assert xVal.getPositionCount() == xDel.getPositionCount() && xVal.getPositionCount() == yVal.getPositionCount() && xVal.getPositionCount() == yDel.getPositionCount() && xVal.getPositionCount() == count.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SpatialCentroidCartesianPointDocValuesAggregator.combineIntermediate(state, groupId, xVal.getDouble(groupPosition + positionOffset), xDel.getDouble(groupPosition + positionOffset), yVal.getDouble(groupPosition + positionOffset), yDel.getDouble(groupPosition + positionOffset), count.getLong(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointSourceValuesGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointSourceValuesGroupingAggregatorFunction.java index 12c0f24ef43e3..0d1378ce988f3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointSourceValuesGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointSourceValuesGroupingAggregatorFunction.java @@ -102,7 +102,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -117,7 +117,7 @@ private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock val private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SpatialCentroidCartesianPointSourceValuesAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -131,7 +131,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -153,7 +153,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); SpatialCentroidCartesianPointSourceValuesAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -190,7 +190,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page LongVector count = ((LongBlock) countUncast).asVector(); assert xVal.getPositionCount() == xDel.getPositionCount() && xVal.getPositionCount() == yVal.getPositionCount() && xVal.getPositionCount() == yDel.getPositionCount() && xVal.getPositionCount() == count.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SpatialCentroidCartesianPointSourceValuesAggregator.combineIntermediate(state, groupId, xVal.getDouble(groupPosition + positionOffset), xDel.getDouble(groupPosition + positionOffset), yVal.getDouble(groupPosition + positionOffset), yDel.getDouble(groupPosition + positionOffset), count.getLong(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointDocValuesGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointDocValuesGroupingAggregatorFunction.java index 2447939d56db9..f5604e9e23200 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointDocValuesGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointDocValuesGroupingAggregatorFunction.java @@ -98,7 +98,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -112,7 +112,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SpatialCentroidGeoPointDocValuesAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -125,7 +125,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -146,7 +146,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); SpatialCentroidGeoPointDocValuesAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -183,7 +183,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page LongVector count = ((LongBlock) countUncast).asVector(); assert xVal.getPositionCount() == xDel.getPositionCount() && xVal.getPositionCount() == yVal.getPositionCount() && xVal.getPositionCount() == yDel.getPositionCount() && xVal.getPositionCount() == count.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SpatialCentroidGeoPointDocValuesAggregator.combineIntermediate(state, groupId, xVal.getDouble(groupPosition + positionOffset), xDel.getDouble(groupPosition + positionOffset), yVal.getDouble(groupPosition + positionOffset), yDel.getDouble(groupPosition + positionOffset), count.getLong(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointSourceValuesGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointSourceValuesGroupingAggregatorFunction.java index 075f8749503b8..b3caeef925a73 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointSourceValuesGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointSourceValuesGroupingAggregatorFunction.java @@ -102,7 +102,7 @@ public void add(int positionOffset, IntVector groupIds) { private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -117,7 +117,7 @@ private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock val private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SpatialCentroidGeoPointSourceValuesAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -131,7 +131,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefBlock valu int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); if (values.isNull(groupPosition + positionOffset)) { continue; } @@ -153,7 +153,7 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val int groupStart = groups.getFirstValueIndex(groupPosition); int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { - int groupId = Math.toIntExact(groups.getInt(g)); + int groupId = groups.getInt(g); SpatialCentroidGeoPointSourceValuesAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -190,7 +190,7 @@ public void addIntermediateInput(int positionOffset, IntVector groups, Page page LongVector count = ((LongBlock) countUncast).asVector(); assert xVal.getPositionCount() == xDel.getPositionCount() && xVal.getPositionCount() == yVal.getPositionCount() && xVal.getPositionCount() == yDel.getPositionCount() && xVal.getPositionCount() == count.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = Math.toIntExact(groups.getInt(groupPosition)); + int groupId = groups.getInt(groupPosition); SpatialCentroidGeoPointSourceValuesAggregator.combineIntermediate(state, groupId, xVal.getDouble(groupPosition + positionOffset), xDel.getDouble(groupPosition + positionOffset), yVal.getDouble(groupPosition + positionOffset), yDel.getDouble(groupPosition + positionOffset), count.getLong(groupPosition + positionOffset)); } } From 27721c3c053ebec2c4dbfd91222d03a839d67348 Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Wed, 21 Aug 2024 08:36:55 -0700 Subject: [PATCH 126/389] Add a test reproducing issue with lookup of parent document in nested field synthetic source (#112043) --- .../indices.create/20_synthetic_source.yml | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index 22deb7012c4ed..e51074ee55270 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -1204,3 +1204,49 @@ nested object with stored array: - match: { hits.hits.1._source.nested_array_stored.0.b.1.c: 100 } - match: { hits.hits.1._source.nested_array_stored.1.b.0.c: 20 } - match: { hits.hits.1._source.nested_array_stored.1.b.1.c: 200 } + +--- +empty nested object sorted as a first document: + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + settings: + index: + sort.field: "name" + sort.order: "asc" + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + nested: + type: nested + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "name": "B", "nested": { "a": "b" } }' + - '{ "create": { } }' + - '{ "name": "A" }' + + - match: { errors: false } + + - do: + search: + index: test + sort: name + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.nested.a: "b" } + From d76e1af0a5bb00ac69abc8e608ba8c3f6b75e54f Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Wed, 21 Aug 2024 08:46:40 -0700 Subject: [PATCH 127/389] Fix calculation of parent offset for ignored source in some cases (#112046) --- docs/changelog/112046.yaml | 5 +++ .../index/mapper/DocumentParser.java | 4 +- .../mapper/IgnoredSourceFieldMapperTests.java | 42 +++++++++++++++++++ 3 files changed, 49 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/112046.yaml diff --git a/docs/changelog/112046.yaml b/docs/changelog/112046.yaml new file mode 100644 index 0000000000000..f3cda1ed7a7d2 --- /dev/null +++ b/docs/changelog/112046.yaml @@ -0,0 +1,5 @@ +pr: 112046 +summary: Fix calculation of parent offset for ignored source in some cases +area: Mapping +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index aad8d5f6dfa2a..2bf3668a3dabe 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -274,7 +274,7 @@ static void parseObjectOrNested(DocumentParserContext context) throws IOExceptio context.addIgnoredField( new IgnoredSourceFieldMapper.NameValue( context.parent().fullPath(), - context.parent().fullPath().indexOf(currentFieldName), + context.parent().fullPath().lastIndexOf(currentFieldName), XContentDataHelper.encodeToken(parser), context.doc() ) @@ -301,7 +301,7 @@ static void parseObjectOrNested(DocumentParserContext context) throws IOExceptio context.addIgnoredField( new IgnoredSourceFieldMapper.NameValue( context.parent().fullPath(), - context.parent().fullPath().indexOf(context.parent().leafName()), + context.parent().fullPath().lastIndexOf(context.parent().leafName()), XContentDataHelper.encodeXContentBuilder(tuple.v2()), context.doc() ) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java index fc30b9b6677f1..436200d919d8f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java @@ -1055,4 +1055,46 @@ public void testRuntimeDynamicObjectNestedArray() throws IOException { assertEquals(""" {"path":[{"to":{"foo":"A","bar":"B"}},{"to":{"foo":"C","bar":"D"}}]}""", syntheticSource); } + + public void testDisabledSubObjectWithNameOverlappingParentName() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path"); + b.startObject("properties"); + { + b.startObject("at").field("type", "object").field("enabled", "false").endObject(); + } + b.endObject(); + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startObject("path"); + { + b.startObject("at").field("foo", "A").endObject(); + } + b.endObject(); + }); + assertEquals(""" + {"path":{"at":{"foo":"A"}}}""", syntheticSource); + } + + public void testStoredNestedSubObjectWithNameOverlappingParentName() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path"); + b.startObject("properties"); + { + b.startObject("at").field("type", "nested").field("store_array_source", "true").endObject(); + } + b.endObject(); + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startObject("path"); + { + b.startObject("at").field("foo", "A").endObject(); + } + b.endObject(); + }); + assertEquals(""" + {"path":{"at":{"foo":"A"}}}""", syntheticSource); + } } From bf1ec5d58f218364313be3c3f08bf2cfbc5481b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iv=C3=A1n=20Cea=20Fontenla?= Date: Wed, 21 Aug 2024 17:47:41 +0200 Subject: [PATCH 128/389] ESQL: Add async ID and is_running headers to ESQL async query (#111840) Add headers to async ESQL queries to show the status and query ID without having to parse the body. ESQL part of #109576 --- docs/changelog/111840.yaml | 5 +++++ .../common/util/concurrent/ThreadContext.java | 2 +- .../xpack/core/async/AsyncExecutionId.java | 3 +++ .../xpack/esql/qa/rest/RestEsqlTestCase.java | 15 ++++++++++++++- .../xpack/esql/action/EsqlCapabilities.java | 5 +++++ .../esql/plugin/TransportEsqlQueryAction.java | 13 ++++++++++--- 6 files changed, 38 insertions(+), 5 deletions(-) create mode 100644 docs/changelog/111840.yaml diff --git a/docs/changelog/111840.yaml b/docs/changelog/111840.yaml new file mode 100644 index 0000000000000..c40a9e2aef621 --- /dev/null +++ b/docs/changelog/111840.yaml @@ -0,0 +1,5 @@ +pr: 111840 +summary: "ESQL: Add async ID and `is_running` headers to ESQL async query" +area: ES|QL +type: feature +issues: [] diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index 0ff0bb2657a5c..7bee1e895bceb 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -579,7 +579,7 @@ public Map getTransientHeaders() { } /** - * Add the {@code value} for the specified {@code key} Any duplicate {@code value} is ignored. + * Add the {@code value} for the specified {@code key}. Any duplicate {@code value} is ignored. * * @param key the header name * @param value the header value diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncExecutionId.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncExecutionId.java index b4cc8bc0d3f30..8316b4cfa605a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncExecutionId.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncExecutionId.java @@ -21,6 +21,9 @@ * A class that contains all information related to a submitted async execution. */ public final class AsyncExecutionId { + public static final String ASYNC_EXECUTION_ID_HEADER = "X-Elasticsearch-Async-Id"; + public static final String ASYNC_EXECUTION_IS_RUNNING_HEADER = "X-Elasticsearch-Async-Is-Running"; + private final String docId; private final TaskId taskId; private final String encoded; diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java index 5a99e8006d6a7..8b6511875e86c 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java @@ -903,17 +903,24 @@ public static Map runEsqlAsync( checkKeepOnCompletion(requestObject, json); String id = (String) json.get("id"); + var supportsAsyncHeaders = clusterHasCapability("POST", "/_query", List.of(), List.of("async_query_status_headers")).orElse(false); + if (id == null) { // no id returned from an async call, must have completed immediately and without keep_on_completion assertThat(requestObject.keepOnCompletion(), either(nullValue()).or(is(false))); assertThat((boolean) json.get("is_running"), is(false)); + if (supportsAsyncHeaders) { + assertThat(response.getHeader("X-Elasticsearch-Async-Id"), nullValue()); + assertThat(response.getHeader("X-Elasticsearch-Async-Is-Running"), is("?0")); + } assertWarnings(response, expectedWarnings, expectedWarningsRegex); json.remove("is_running"); // remove this to not mess up later map assertions return Collections.unmodifiableMap(json); } else { // async may not return results immediately, so may need an async get assertThat(id, is(not(emptyOrNullString()))); - if ((boolean) json.get("is_running") == false) { + boolean isRunning = (boolean) json.get("is_running"); + if (isRunning == false) { // must have completed immediately so keep_on_completion must be true assertThat(requestObject.keepOnCompletion(), is(true)); assertWarnings(response, expectedWarnings, expectedWarningsRegex); @@ -925,6 +932,12 @@ public static Map runEsqlAsync( assertThat(json.get("columns"), is(equalTo(List.>of()))); // no partial results assertThat(json.get("pages"), nullValue()); } + + if (supportsAsyncHeaders) { + assertThat(response.getHeader("X-Elasticsearch-Async-Id"), is(id)); + assertThat(response.getHeader("X-Elasticsearch-Async-Is-Running"), is(isRunning ? "?1" : "?0")); + } + // issue a second request to "async get" the results Request getRequest = prepareAsyncGetRequest(id); getRequest.setOptions(options); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 913eb382a5daf..b60701fe19365 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -236,6 +236,11 @@ public enum Cap { */ COMBINE_DISJUNCTIVE_CIDRMATCHES, + /** + * Support sending HTTP headers about the status of an async query. + */ + ASYNC_QUERY_STATUS_HEADERS, + /** * Consider the upper bound when computing the interval in BUCKET auto mode. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index cab6161cb3eea..561baa76a01a9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -55,6 +55,7 @@ public class TransportEsqlQueryAction extends HandledTransportAction { + private final ThreadPool threadPool; private final PlanExecutor planExecutor; private final ComputeService computeService; private final ExchangeService exchangeService; @@ -82,6 +83,7 @@ public TransportEsqlQueryAction( ) { // TODO replace SAME when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 super(EsqlQueryAction.NAME, transportService, actionFilters, EsqlQueryRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + this.threadPool = threadPool; this.planExecutor = planExecutor; this.clusterService = clusterService; this.requestExecutor = threadPool.executor(ThreadPool.Names.SEARCH); @@ -181,9 +183,11 @@ private void innerExecute(Task task, EsqlQueryRequest request, ActionListener columns = result.schema().stream().map(c -> new ColumnInfoImpl(c.name(), c.dataType().outputType())).toList(); EsqlQueryResponse.Profile profile = configuration.profile() ? new EsqlQueryResponse.Profile(result.profiles()) : null; + threadPool.getThreadContext().addResponseHeader(AsyncExecutionId.ASYNC_EXECUTION_IS_RUNNING_HEADER, "?0"); if (task instanceof EsqlQueryTask asyncTask && request.keepOnCompletion()) { - String id = asyncTask.getExecutionId().getEncoded(); - return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar(), id, false, request.async()); + String asyncExecutionId = asyncTask.getExecutionId().getEncoded(); + threadPool.getThreadContext().addResponseHeader(AsyncExecutionId.ASYNC_EXECUTION_ID_HEADER, asyncExecutionId); + return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar(), asyncExecutionId, false, request.async()); } return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar(), request.async()); } @@ -231,12 +235,15 @@ public EsqlQueryTask createTask( @Override public EsqlQueryResponse initialResponse(EsqlQueryTask task) { + var asyncExecutionId = task.getExecutionId().getEncoded(); + threadPool.getThreadContext().addResponseHeader(AsyncExecutionId.ASYNC_EXECUTION_ID_HEADER, asyncExecutionId); + threadPool.getThreadContext().addResponseHeader(AsyncExecutionId.ASYNC_EXECUTION_IS_RUNNING_HEADER, "?1"); return new EsqlQueryResponse( List.of(), List.of(), null, false, - task.getExecutionId().getEncoded(), + asyncExecutionId, true, // is_running true // isAsync ); From a281bf8b17e0d88109c70af58e6daef8becaf06d Mon Sep 17 00:00:00 2001 From: Kathleen DeRusso Date: Wed, 21 Aug 2024 12:59:06 -0400 Subject: [PATCH 129/389] Fix RRF validation for rank_constant < 1 (#112058) * Fix RRF validation for t push rank_constant < 1 * Add yaml test * Update docs/changelog/112058.yaml --- docs/changelog/112058.yaml | 5 ++ .../xpack/rank/rrf/RRFRankBuilder.java | 8 +-- .../rest-api-spec/test/rrf/100_rank_rrf.yml | 54 ++++++++++++++++--- 3 files changed, 57 insertions(+), 10 deletions(-) create mode 100644 docs/changelog/112058.yaml diff --git a/docs/changelog/112058.yaml b/docs/changelog/112058.yaml new file mode 100644 index 0000000000000..e974b3413582e --- /dev/null +++ b/docs/changelog/112058.yaml @@ -0,0 +1,5 @@ +pr: 112058 +summary: Fix RRF validation for `rank_constant` < 1 +area: Ranking +type: bug +issues: [] diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankBuilder.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankBuilder.java index e891e575e7de3..10aff2f4d68cd 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankBuilder.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankBuilder.java @@ -46,9 +46,6 @@ public class RRFRankBuilder extends RankBuilder { static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(RRFRankPlugin.NAME, args -> { int windowSize = args[0] == null ? DEFAULT_RANK_WINDOW_SIZE : (int) args[0]; int rankConstant = args[1] == null ? DEFAULT_RANK_CONSTANT : (int) args[1]; - if (rankConstant < 1) { - throw new IllegalArgumentException("[rank_constant] must be greater than [0] for [rrf]"); - } return new RRFRankBuilder(windowSize, rankConstant); }); @@ -73,6 +70,11 @@ public void doXContent(XContentBuilder builder, Params params) throws IOExceptio public RRFRankBuilder(int rankWindowSize, int rankConstant) { super(rankWindowSize); + + if (rankConstant < 1) { + throw new IllegalArgumentException("[rank_constant] must be greater or equal to [1] for [rrf]"); + } + this.rankConstant = rankConstant; } diff --git a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/100_rank_rrf.yml b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/100_rank_rrf.yml index a4972d0557dab..4f76f52409810 100644 --- a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/100_rank_rrf.yml +++ b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/100_rank_rrf.yml @@ -33,7 +33,7 @@ setup: body: text: "term term" keyword: "other" - vector: [0.0] + vector: [ 0.0 ] - do: index: @@ -42,7 +42,7 @@ setup: body: text: "other" keyword: "other" - vector: [1.0] + vector: [ 1.0 ] - do: index: @@ -51,10 +51,10 @@ setup: body: text: "term" keyword: "keyword" - vector: [2.0] + vector: [ 2.0 ] - do: - indices.refresh: {} + indices.refresh: { } --- "Simple rank with bm25 search and kNN search": @@ -67,7 +67,7 @@ setup: fields: [ "text", "keyword" ] knn: field: vector - query_vector: [0.0] + query_vector: [ 0.0 ] k: 3 num_candidates: 3 query: @@ -125,7 +125,7 @@ setup: rank_constant: 1 size: 10 - - match: { hits.total.value : 2 } + - match: { hits.total.value: 2 } - match: { hits.hits.0._id: "3" } - match: { hits.hits.0._rank: 1 } @@ -173,7 +173,7 @@ setup: rank_constant: 1 size: 10 - - match: { hits.total.value : 3 } + - match: { hits.total.value: 3 } - match: { hits.hits.0._id: "3" } - match: { hits.hits.0._rank: 1 } @@ -227,3 +227,43 @@ setup: rank_window_size: 2 rank_constant: 1 size: 10 + +--- +"RRF rank should fail if rank_constant < 1": + - requires: + cluster_features: "gte_v8.16.0" + reason: 'validation fixed in 8.16.0' + + - do: + catch: "/\\[rank_constant\\] must be greater or equal to \\[1\\] for \\[rrf\\]/" + search: + index: test + body: + track_total_hits: true + fields: [ "text", "keyword" ] + knn: + field: vector + query_vector: [ 0.0 ] + k: 3 + num_candidates: 3 + sub_searches: [ + { + "query": { + "term": { + "text": "term" + } + } + }, + { + "query": { + "match": { + "keyword": "keyword" + } + } + } + ] + rank: + rrf: + rank_window_size: 10 + rank_constant: 0.3 + size: 10 From a35da247fdb4dd40fc2c5b45ceb292b2863563a2 Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Wed, 21 Aug 2024 11:21:41 -0700 Subject: [PATCH 130/389] Merge multiple ignored source entires for the same field (#111994) --- docs/changelog/111994.yaml | 6 + ...ogsIndexModeRandomDataChallengeRestIT.java | 2 +- .../mapper/IgnoredSourceFieldMapper.java | 2 +- .../index/mapper/ObjectMapper.java | 85 ++++++++--- .../index/mapper/XContentDataHelper.java | 66 ++++++++ .../mapper/IgnoredSourceFieldMapperTests.java | 141 ++++++++++++++++++ .../index/mapper/XContentDataHelperTests.java | 96 ++++++++++++ .../DefaultMappingParametersHandler.java | 11 +- .../test/FieldMaskingReader.java | 14 +- 9 files changed, 390 insertions(+), 33 deletions(-) create mode 100644 docs/changelog/111994.yaml diff --git a/docs/changelog/111994.yaml b/docs/changelog/111994.yaml new file mode 100644 index 0000000000000..ee62651c43987 --- /dev/null +++ b/docs/changelog/111994.yaml @@ -0,0 +1,6 @@ +pr: 111994 +summary: Merge multiple ignored source entires for the same field +area: Logs +type: bug +issues: + - 111694 diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java index 8f23f86267261..4e123c1630457 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java @@ -68,7 +68,7 @@ public DataSourceResponse.ObjectMappingParametersGenerator handle(DataSourceRequ assert request.isNested() == false; // "enabled: false" is not compatible with subobjects: false - // "runtime: false/strict/runtime" is not compatible with subobjects: false + // "dynamic: false/strict/runtime" is not compatible with subobjects: false return new DataSourceResponse.ObjectMappingParametersGenerator(() -> { var parameters = new HashMap(); if (ESTestCase.randomBoolean()) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java index efbc75490550d..f94a05b2a8658 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java @@ -83,7 +83,7 @@ void write(XContentBuilder builder) throws IOException { XContentDataHelper.decodeAndWrite(builder, value()); } - private String getFieldName() { + String getFieldName() { return parentOffset() == 0 ? name() : name().substring(parentOffset()); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index 2c78db6bc8b0d..e504702d84c1e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -9,12 +9,12 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.LeafReader; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -32,6 +32,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.TreeMap; import java.util.stream.Stream; @@ -74,7 +75,7 @@ DynamicFieldsBuilder getDynamicFieldsBuilder() { * If no dynamic settings are explicitly configured, we default to {@link #TRUE} */ static Dynamic getRootDynamic(MappingLookup mappingLookup) { - ObjectMapper.Dynamic rootDynamic = mappingLookup.getMapping().getRoot().dynamic; + Dynamic rootDynamic = mappingLookup.getMapping().getRoot().dynamic; return rootDynamic == null ? Defaults.DYNAMIC : rootDynamic; } } @@ -142,13 +143,13 @@ public final void addDynamic(String name, String prefix, Mapper mapper, Document int firstDotIndex = name.indexOf('.'); String immediateChild = name.substring(0, firstDotIndex); String immediateChildFullName = prefix == null ? immediateChild : prefix + "." + immediateChild; - ObjectMapper.Builder parentBuilder = findObjectBuilder(immediateChildFullName, context); + Builder parentBuilder = findObjectBuilder(immediateChildFullName, context); parentBuilder.addDynamic(name.substring(firstDotIndex + 1), immediateChildFullName, mapper, context); add(parentBuilder); } } - private static ObjectMapper.Builder findObjectBuilder(String fullName, DocumentParserContext context) { + private static Builder findObjectBuilder(String fullName, DocumentParserContext context) { // does the object mapper already exist? if so, use that ObjectMapper objectMapper = context.mappingLookup().objectMappers().get(fullName); if (objectMapper != null) { @@ -215,7 +216,7 @@ public Mapper.Builder parse(String name, Map node, MappingParser throws MapperParsingException { parserContext.incrementMappingObjectDepth(); // throws MapperParsingException if depth limit is exceeded Explicit subobjects = parseSubobjects(node); - ObjectMapper.Builder builder = new Builder(name, subobjects); + Builder builder = new Builder(name, subobjects); parseObjectFields(node, parserContext, builder); parserContext.decrementMappingObjectDepth(); return builder; @@ -237,7 +238,7 @@ protected static boolean parseObjectOrDocumentTypeProperties( String fieldName, Object fieldNode, MappingParserContext parserContext, - ObjectMapper.Builder builder + Builder builder ) { if (fieldName.equals("dynamic")) { String value = fieldNode.toString(); @@ -284,11 +285,7 @@ protected static Explicit parseSubobjects(Map node) { return Defaults.SUBOBJECTS; } - protected static void parseProperties( - ObjectMapper.Builder objBuilder, - Map propsNode, - MappingParserContext parserContext - ) { + protected static void parseProperties(Builder objBuilder, Map propsNode, MappingParserContext parserContext) { Iterator> iterator = propsNode.entrySet().iterator(); while (iterator.hasNext()) { Map.Entry entry = iterator.next(); @@ -347,7 +344,7 @@ protected static void parseProperties( for (int i = fieldNameParts.length - 2; i >= 0; --i) { String intermediateObjectName = fieldNameParts[i]; validateFieldName(intermediateObjectName, parserContext.indexVersionCreated()); - ObjectMapper.Builder intermediate = new ObjectMapper.Builder(intermediateObjectName, Defaults.SUBOBJECTS); + Builder intermediate = new Builder(intermediateObjectName, Defaults.SUBOBJECTS); intermediate.add(fieldBuilder); fieldBuilder = intermediate; } @@ -417,8 +414,8 @@ private static void validateFieldName(String fieldName, IndexVersion indexCreate /** * @return a Builder that will produce an empty ObjectMapper with the same configuration as this one */ - public ObjectMapper.Builder newBuilder(IndexVersion indexVersionCreated) { - ObjectMapper.Builder builder = new ObjectMapper.Builder(leafName(), subobjects); + public Builder newBuilder(IndexVersion indexVersionCreated) { + Builder builder = new Builder(leafName(), subobjects); builder.enabled = this.enabled; builder.dynamic = this.dynamic; return builder; @@ -507,7 +504,7 @@ protected record MergeResult( Explicit enabled, Explicit subObjects, Explicit trackArraySource, - ObjectMapper.Dynamic dynamic, + Dynamic dynamic, Map mappers ) { static MergeResult build(ObjectMapper existing, ObjectMapper mergeWithObject, MapperMergeContext parentMergeContext) { @@ -789,9 +786,9 @@ public Stream> storedFieldLoaders() { @Override public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { - List loaders = new ArrayList<>(); + List loaders = new ArrayList<>(); for (SourceLoader.SyntheticFieldLoader field : fields) { - SourceLoader.SyntheticFieldLoader.DocValuesLoader loader = field.docValuesLoader(leafReader, docIdsInLeaf); + DocValuesLoader loader = field.docValuesLoader(leafReader, docIdsInLeaf); if (loader != null) { loaders.add(loader); } @@ -803,7 +800,7 @@ public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf } private class ObjectDocValuesLoader implements DocValuesLoader { - private final List loaders; + private final List loaders; private ObjectDocValuesLoader(List loaders) { this.loaders = loaders; @@ -812,7 +809,7 @@ private ObjectDocValuesLoader(List loaders) { @Override public boolean advanceToDoc(int docId) throws IOException { boolean anyLeafHasDocValues = false; - for (SourceLoader.SyntheticFieldLoader.DocValuesLoader docValueLoader : loaders) { + for (DocValuesLoader docValueLoader : loaders) { boolean leafHasValue = docValueLoader.advanceToDoc(docId); anyLeafHasDocValues |= leafHasValue; } @@ -848,18 +845,24 @@ public void write(XContentBuilder b) throws IOException { if (ignoredValues != null && ignoredValues.isEmpty() == false) { // Use an ordered map between field names and writer functions, to order writing by field name. - Map> orderedFields = new TreeMap<>(); + Map orderedFields = new TreeMap<>(); for (IgnoredSourceFieldMapper.NameValue value : ignoredValues) { - orderedFields.put(value.name(), value::write); + var existing = orderedFields.get(value.name()); + if (existing == null) { + orderedFields.put(value.name(), new FieldWriter.IgnoredSource(value)); + } else if (existing instanceof FieldWriter.IgnoredSource isw) { + isw.mergeWith(value); + } } for (SourceLoader.SyntheticFieldLoader field : fields) { if (field.hasValue()) { // Skip if the field source is stored separately, to avoid double-printing. - orderedFields.putIfAbsent(field.fieldName(), field::write); + orderedFields.computeIfAbsent(field.fieldName(), k -> new FieldWriter.FieldLoader(field)); } } + for (var writer : orderedFields.values()) { - writer.accept(b); + writer.writeTo(b); } ignoredValues = null; } else { @@ -890,6 +893,42 @@ public boolean setIgnoredValues(Map values; + + IgnoredSource(IgnoredSourceFieldMapper.NameValue initialValue) { + this.fieldName = initialValue.name(); + this.leafName = initialValue.getFieldName(); + this.values = new ArrayList<>(); + this.values.add(initialValue.value()); + } + + @Override + public void writeTo(XContentBuilder builder) throws IOException { + XContentDataHelper.writeMerged(builder, leafName, values); + } + + public FieldWriter mergeWith(IgnoredSourceFieldMapper.NameValue nameValue) { + assert Objects.equals(nameValue.name(), fieldName) : "IgnoredSource is merged with wrong field data"; + + values.add(nameValue.value()); + return this; + } + } + } } protected boolean isRoot() { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java b/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java index d97e03d3874ee..fefafbf13017b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java @@ -26,6 +26,8 @@ import java.math.BigInteger; import java.nio.charset.StandardCharsets; import java.util.Arrays; +import java.util.List; +import java.util.Optional; /** * Helper class for processing field data of any type, as provided by the {@link XContentParser}. @@ -92,6 +94,70 @@ static void decodeAndWrite(XContentBuilder b, BytesRef r) throws IOException { } } + /** + * Writes encoded values to provided builder. If there are multiple values they are merged into + * a single resulting array. + * @param b destination + * @param fieldName name of the field that is written + * @param encodedParts subset of field data encoded using methods of this class. Can contain arrays which will be flattened. + * @throws IOException + */ + static void writeMerged(XContentBuilder b, String fieldName, List encodedParts) throws IOException { + if (encodedParts.isEmpty()) { + return; + } + + if (encodedParts.size() == 1) { + b.field(fieldName); + XContentDataHelper.decodeAndWrite(b, encodedParts.get(0)); + return; + } + + b.startArray(fieldName); + + for (var encodedValue : encodedParts) { + Optional encodedXContentType = switch ((char) encodedValue.bytes[encodedValue.offset]) { + case CBOR_OBJECT_ENCODING, JSON_OBJECT_ENCODING, YAML_OBJECT_ENCODING, SMILE_OBJECT_ENCODING -> Optional.of( + getXContentType(encodedValue) + ); + default -> Optional.empty(); + }; + if (encodedXContentType.isEmpty()) { + // This is a plain value, we can just write it + XContentDataHelper.decodeAndWrite(b, encodedValue); + } else { + // Encoded value could be an array which needs to be flattened + // since we are already inside an array. + try ( + XContentParser parser = encodedXContentType.get() + .xContent() + .createParser( + XContentParserConfiguration.EMPTY, + encodedValue.bytes, + encodedValue.offset + 1, + encodedValue.length - 1 + ) + ) { + if (parser.currentToken() == null) { + parser.nextToken(); + } + + // It's an array, we will flatten it. + if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + b.copyCurrentStructure(parser); + } + } else { + // It is a single complex structure (an object), write it as is. + b.copyCurrentStructure(parser); + } + } + } + } + + b.endArray(); + } + /** * Returns the {@link XContentType} to use for creating an XContentBuilder to decode the passed value. */ diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java index 436200d919d8f..dcb5cd1711c8c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java @@ -8,8 +8,10 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.DirectoryReader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.test.FieldMaskingReader; import org.elasticsearch.xcontent.XContentBuilder; import org.hamcrest.Matchers; @@ -19,6 +21,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; public class IgnoredSourceFieldMapperTests extends MapperServiceTestCase { @@ -633,6 +636,132 @@ public void testArrayWithinArray() throws IOException { {"path":[{"to":[{"name":"A"},{"name":"B"}]},{"to":[{"name":"C"},{"name":"D"}]}]}""", booleanValue), syntheticSource); } + public void testDisabledObjectWithinHigherLevelArray() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path"); + { + b.field("type", "object"); + b.startObject("properties"); + { + b.startObject("to").field("type", "object").field("enabled", false); + { + b.startObject("properties"); + { + b.startObject("name").field("type", "keyword").endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + + boolean booleanValue = randomBoolean(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startArray("path"); + { + b.startObject(); + { + b.startObject("to").field("name", "A").endObject(); + } + b.endObject(); + b.startObject(); + { + b.startObject("to").field("name", "B").endObject(); + } + b.endObject(); + } + b.endArray(); + }); + assertEquals(String.format(Locale.ROOT, """ + {"path":{"to":[{"name":"A"},{"name":"B"}]}}""", booleanValue), syntheticSource); + } + + public void testStoredArrayWithinHigherLevelArray() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path"); + { + b.field("type", "object"); + b.startObject("properties"); + { + b.startObject("to").field("type", "object").field("store_array_source", true); + { + b.startObject("properties"); + { + b.startObject("name").field("type", "keyword").endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + + boolean booleanValue = randomBoolean(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startArray("path"); + { + b.startObject(); + { + b.startArray("to"); + { + b.startObject().field("name", "A").endObject(); + b.startObject().field("name", "B").endObject(); + } + b.endArray(); + } + b.endObject(); + b.startObject(); + { + b.startArray("to"); + { + b.startObject().field("name", "C").endObject(); + b.startObject().field("name", "D").endObject(); + } + b.endArray(); + } + b.endObject(); + } + b.endArray(); + }); + assertEquals(String.format(Locale.ROOT, """ + {"path":{"to":[{"name":"A"},{"name":"B"},{"name":"C"},{"name":"D"}]}}""", booleanValue), syntheticSource); + } + + public void testFallbackFieldWithinHigherLevelArray() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path"); + { + b.field("type", "object"); + b.startObject("properties"); + { + b.startObject("name").field("type", "keyword").field("doc_values", false).endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + + boolean booleanValue = randomBoolean(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startArray("path"); + { + + b.startObject().field("name", "A").endObject(); + b.startObject().field("name", "B").endObject(); + b.startObject().field("name", "C").endObject(); + b.startObject().field("name", "D").endObject(); + } + b.endArray(); + }); + assertEquals(String.format(Locale.ROOT, """ + {"path":{"name":["A","B","C","D"]}}""", booleanValue), syntheticSource); + } + public void testFieldOrdering() throws IOException { DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { b.startObject("A").field("type", "integer").endObject(); @@ -1097,4 +1226,16 @@ public void testStoredNestedSubObjectWithNameOverlappingParentName() throws IOEx assertEquals(""" {"path":{"at":{"foo":"A"}}}""", syntheticSource); } + + protected void validateRoundTripReader(String syntheticSource, DirectoryReader reader, DirectoryReader roundTripReader) + throws IOException { + // We exclude ignored source field since in some cases it contains an exact copy of a part of document source. + // Sometime synthetic source is different in this case (structurally but not logically) + // and since the copy is exact, contents of ignored source are different. + assertReaderEquals( + "round trip " + syntheticSource, + new FieldMaskingReader(Set.of(SourceFieldMapper.RECOVERY_SOURCE_NAME, IgnoredSourceFieldMapper.NAME), reader), + new FieldMaskingReader(Set.of(SourceFieldMapper.RECOVERY_SOURCE_NAME, IgnoredSourceFieldMapper.NAME), roundTripReader) + ); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/XContentDataHelperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/XContentDataHelperTests.java index cd5b43d0af771..a4532bca67778 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/XContentDataHelperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/XContentDataHelperTests.java @@ -8,9 +8,11 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -21,8 +23,12 @@ import java.io.IOException; import java.math.BigDecimal; import java.math.BigInteger; +import java.util.Arrays; import java.util.Base64; +import java.util.Collection; import java.util.List; +import java.util.Map; +import java.util.stream.Stream; import static org.hamcrest.Matchers.equalTo; @@ -168,4 +174,94 @@ public void testCloneSubContextWithParser() throws IOException { assertEquals(data, dataInParser(tuple.v2())); assertTrue(tuple.v1().getClonedSource()); } + + public void testWriteMergedWithSingleValue() throws IOException { + testWriteMergedWithSingleValue(randomLong()); + testWriteMergedWithSingleValue(randomDouble()); + testWriteMergedWithSingleValue(randomBoolean()); + testWriteMergedWithSingleValue(randomAlphaOfLength(5)); + testWriteMergedWithSingleValue(null); + testWriteMergedWithSingleValue(Map.of("object_field", randomAlphaOfLength(5))); + testWriteMergedWithSingleValue(Map.of("object_field", Map.of("nested_object_field", randomAlphaOfLength(5)))); + } + + private void testWriteMergedWithSingleValue(Object value) throws IOException { + var map = executeWriteMergeOnRepeated(value); + assertEquals(Arrays.asList(value, value), map.get("foo")); + } + + public void testWriteMergedWithMultipleValues() throws IOException { + testWriteMergedWithMultipleValues(List.of(randomLong(), randomLong())); + testWriteMergedWithMultipleValues(List.of(randomDouble(), randomDouble())); + testWriteMergedWithMultipleValues(List.of(randomBoolean(), randomBoolean())); + testWriteMergedWithMultipleValues(List.of(randomAlphaOfLength(5), randomAlphaOfLength(5))); + testWriteMergedWithMultipleValues(Arrays.asList(null, null)); + testWriteMergedWithMultipleValues( + List.of(Map.of("object_field", randomAlphaOfLength(5)), Map.of("object_field", randomAlphaOfLength(5))) + ); + testWriteMergedWithMultipleValues( + List.of( + Map.of("object_field", Map.of("nested_object_field", randomAlphaOfLength(5))), + Map.of("object_field", Map.of("nested_object_field", randomAlphaOfLength(5))) + ) + ); + } + + private void testWriteMergedWithMultipleValues(List value) throws IOException { + var map = executeWriteMergeOnRepeated(value); + var expected = Stream.of(value, value).flatMap(Collection::stream).toList(); + assertEquals(expected, map.get("foo")); + } + + public void testWriteMergedWithMixedValues() throws IOException { + testWriteMergedWithMixedValues(randomLong(), List.of(randomLong(), randomLong())); + testWriteMergedWithMixedValues(randomDouble(), List.of(randomDouble(), randomDouble())); + testWriteMergedWithMixedValues(randomBoolean(), List.of(randomBoolean(), randomBoolean())); + testWriteMergedWithMixedValues(randomAlphaOfLength(5), List.of(randomAlphaOfLength(5), randomAlphaOfLength(5))); + testWriteMergedWithMixedValues(null, Arrays.asList(null, null)); + testWriteMergedWithMixedValues( + Map.of("object_field", randomAlphaOfLength(5)), + List.of(Map.of("object_field", randomAlphaOfLength(5)), Map.of("object_field", randomAlphaOfLength(5))) + ); + testWriteMergedWithMixedValues( + Map.of("object_field", Map.of("nested_object_field", randomAlphaOfLength(5))), + List.of( + Map.of("object_field", Map.of("nested_object_field", randomAlphaOfLength(5))), + Map.of("object_field", Map.of("nested_object_field", randomAlphaOfLength(5))) + ) + ); + } + + private void testWriteMergedWithMixedValues(Object value, List multipleValues) throws IOException { + var map = executeWriteMergeOnTwoEncodedValues(value, multipleValues); + var expected = Stream.concat(Stream.of(value), multipleValues.stream()).toList(); + assertEquals(expected, map.get("foo")); + } + + private Map executeWriteMergeOnRepeated(Object value) throws IOException { + return executeWriteMergeOnTwoEncodedValues(value, value); + } + + private Map executeWriteMergeOnTwoEncodedValues(Object first, Object second) throws IOException { + var xContentType = randomFrom(XContentType.values()); + + var firstEncoded = encodeSingleValue(first, xContentType); + var secondEncoded = encodeSingleValue(second, xContentType); + + var destination = XContentFactory.contentBuilder(xContentType); + destination.startObject(); + XContentDataHelper.writeMerged(destination, "foo", List.of(firstEncoded, secondEncoded)); + destination.endObject(); + + return XContentHelper.convertToMap(BytesReference.bytes(destination), false, xContentType).v2(); + } + + private BytesRef encodeSingleValue(Object value, XContentType xContentType) throws IOException { + var builder = XContentFactory.contentBuilder(xContentType); + builder.value(value); + + XContentParser parser = createParser(builder); + parser.nextToken(); + return XContentDataHelper.encodeToken(parser); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultMappingParametersHandler.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultMappingParametersHandler.java index aeb34ad2e7049..9eea4e6ae932f 100644 --- a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultMappingParametersHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultMappingParametersHandler.java @@ -31,10 +31,15 @@ private Supplier> keywordMapping() { return () -> Map.of("store", ESTestCase.randomBoolean(), "index", ESTestCase.randomBoolean()); } - // TODO enable doc_values: false - // It is disabled because it hits a bug in synthetic source. private Supplier> numberMapping() { - return () -> Map.of("store", ESTestCase.randomBoolean(), "index", ESTestCase.randomBoolean()); + return () -> Map.of( + "store", + ESTestCase.randomBoolean(), + "index", + ESTestCase.randomBoolean(), + "doc_values", + ESTestCase.randomBoolean() + ); } private Supplier> unsignedLongMapping() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java b/test/framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java index 0db85e4e67711..e0c2456db144e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java +++ b/test/framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java @@ -14,16 +14,20 @@ import org.apache.lucene.tests.index.FieldFilterLeafReader; import java.io.IOException; -import java.util.Collections; +import java.util.Set; public class FieldMaskingReader extends FilterDirectoryReader { - private final String field; + private final Set fields; public FieldMaskingReader(String field, DirectoryReader in) throws IOException { + this(Set.of(field), in); + } + + public FieldMaskingReader(Set fields, DirectoryReader in) throws IOException { super(in, new FilterDirectoryReader.SubReaderWrapper() { @Override public LeafReader wrap(LeafReader reader) { - return new FilterLeafReader(new FieldFilterLeafReader(reader, Collections.singleton(field), true)) { + return new FilterLeafReader(new FieldFilterLeafReader(reader, fields, true)) { // FieldFilterLeafReader does not forward cache helpers // since it considers it is illegal because of the fact @@ -43,13 +47,13 @@ public CacheHelper getCoreCacheHelper() { }; } }); - this.field = field; + this.fields = fields; } @Override protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException { - return new FieldMaskingReader(field, in); + return new FieldMaskingReader(fields, in); } @Override From 6dd31071033e472c4d7094ae46db3a7f1f1ff76a Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 21 Aug 2024 16:18:42 -0400 Subject: [PATCH 131/389] ESQL: Speed up attribute serialization tests (#112069) Speeds up the attribute serialization tests by building the random configuration one time, rather than over and over and over again. It's expensive to make a random configuration! --- .../function/AbstractAttributeTestCase.java | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java index bc29e33c4a17f..c625ae5dfb61b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.expression.function; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; @@ -20,17 +19,24 @@ import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.session.Configuration; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; -import static org.elasticsearch.xpack.esql.ConfigurationTestUtils.randomConfiguration; import static org.hamcrest.Matchers.sameInstance; public abstract class AbstractAttributeTestCase extends AbstractWireSerializingTestCase< AbstractAttributeTestCase.ExtraAttribute> { + + /** + * We use a single random config for all serialization because it's pretty + * heavy to build, especially in {@link #testConcurrentSerialization()}. + */ + private Configuration config; + protected abstract T create(); protected abstract T mutate(T instance); @@ -56,7 +62,11 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { @Override protected final Writeable.Reader instanceReader() { - return ExtraAttribute::new; + return in -> { + PlanStreamInput pin = new PlanStreamInput(in, PlanNameRegistry.INSTANCE, in.namedWriteableRegistry(), config); + pin.setTransportVersion(in.getTransportVersion()); + return new ExtraAttribute(pin); + }; } /** @@ -70,10 +80,8 @@ public static class ExtraAttribute implements Writeable { assertThat(a.source(), sameInstance(Source.EMPTY)); } - ExtraAttribute(StreamInput in) throws IOException { - PlanStreamInput ps = new PlanStreamInput(in, PlanNameRegistry.INSTANCE, in.namedWriteableRegistry(), randomConfiguration()); - ps.setTransportVersion(in.getTransportVersion()); - a = ps.readNamedWriteable(Attribute.class); + ExtraAttribute(PlanStreamInput in) throws IOException { + a = in.readNamedWriteable(Attribute.class); } @Override From f0dbda75294bc7b5f157668553e1a7a5bc5294ca Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 21 Aug 2024 22:26:57 +0100 Subject: [PATCH 132/389] Expand docs on remote cluster proxying (#112025) It's not obvious from the docs that transport connections (including connections to remote clusters) use a custom binary protocol and require a _layer 4_ proxy. This commit clarifies this point. --- docs/reference/modules/network.asciidoc | 4 ++- .../modules/remote-clusters.asciidoc | 31 +++++++++++-------- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/docs/reference/modules/network.asciidoc b/docs/reference/modules/network.asciidoc index 593aa79ded4d9..8fdc9f2e4f9cb 100644 --- a/docs/reference/modules/network.asciidoc +++ b/docs/reference/modules/network.asciidoc @@ -5,7 +5,9 @@ Each {es} node has two different network interfaces. Clients send requests to {es}'s REST APIs using its <>, but nodes communicate with other nodes using the <>. The transport interface is also used for communication with -<>. +<>. The transport interface uses a custom +binary protocol sent over <> TCP channels. +Both interfaces can be configured to use <>. You can configure both of these interfaces at the same time using the `network.*` settings. If you have a more complicated network, you might need to diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 25217302b7631..510ceb6ddb013 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -63,11 +63,13 @@ the same security domain. <>. [[sniff-mode]] Sniff mode:: -In sniff mode, a cluster is created using a name and a list of seed nodes. When -a remote cluster is registered, its cluster state is retrieved from one of the -seed nodes and up to three _gateway nodes_ are selected as part of remote -cluster requests. This mode requires that the gateway node's publish addresses -are accessible by the local cluster. +In sniff mode, a cluster is registered with a name of your choosing and a list +of addresses of _seed_ nodes. When you register a remote cluster using sniff +mode, {es} retrieves from one of the seed nodes the addresses of up to three +_gateway nodes_. Each `remote_cluster_client` node in the local {es} cluster +then opens several TCP connections to the publish addresses of the gateway +nodes. This mode therefore requires that the gateway nodes' publish addresses +are accessible to nodes in the local cluster. + Sniff mode is the default connection mode. + @@ -84,15 +86,18 @@ However, such nodes still have to satisfy the two above requirements. [[proxy-mode]] Proxy mode:: -In proxy mode, a cluster is created using a name and a single proxy address. -When you register a remote cluster, a configurable number of socket connections -are opened to the proxy address. The proxy is required to route those -connections to the remote cluster. Proxy mode does not require remote cluster -nodes to have accessible publish addresses. +In proxy mode, a cluster is registered with a name of your choosing and the +address of a TCP (layer 4) reverse proxy which you must configure to route +connections to the nodes of the remote cluster. When you register a remote +cluster using proxy mode, {es} opens several TCP connections to the proxy +address and uses these connections to communicate with the remote cluster. In +proxy mode {es} disregards the publish addresses of the remote cluster nodes +which means that the publish addresses of the remote cluster nodes need not be +accessible to the local cluster. + -The proxy mode is not the default connection mode and must be configured. -Proxy mode has the same <> as sniff mode. +Proxy mode is not the default connection mode, so you must configure it +explicitly if desired. Proxy mode has the same <> as sniff mode. include::cluster/remote-clusters-api-key.asciidoc[] From fd37ef88c28744181d4628a05baed57098884bd9 Mon Sep 17 00:00:00 2001 From: Vishal Raj Date: Thu, 22 Aug 2024 00:12:24 +0100 Subject: [PATCH 133/389] [plugin/apm-data] Set fallback to legacy ILM policies (#112028) --- .../resources/index-templates/logs-apm.app@template.yaml | 3 +++ .../resources/index-templates/logs-apm.error@template.yaml | 3 +++ .../resources/index-templates/metrics-apm.app@template.yaml | 3 +++ .../index-templates/metrics-apm.internal@template.yaml | 3 +++ .../metrics-apm.service_destination.10m@template.yaml | 3 +++ .../metrics-apm.service_destination.1m@template.yaml | 3 +++ .../metrics-apm.service_destination.60m@template.yaml | 3 +++ .../metrics-apm.service_summary.10m@template.yaml | 3 +++ .../metrics-apm.service_summary.1m@template.yaml | 3 +++ .../metrics-apm.service_summary.60m@template.yaml | 3 +++ .../metrics-apm.service_transaction.10m@template.yaml | 3 +++ .../metrics-apm.service_transaction.1m@template.yaml | 3 +++ .../metrics-apm.service_transaction.60m@template.yaml | 3 +++ .../metrics-apm.transaction.10m@template.yaml | 3 +++ .../index-templates/metrics-apm.transaction.1m@template.yaml | 3 +++ .../metrics-apm.transaction.60m@template.yaml | 3 +++ .../resources/index-templates/traces-apm.rum@template.yaml | 3 +++ .../index-templates/traces-apm.sampled@template.yaml | 5 +++++ .../main/resources/index-templates/traces-apm@template.yaml | 3 +++ x-pack/plugin/apm-data/src/main/resources/resources.yaml | 2 +- 20 files changed, 60 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml index 21cad50f3fe90..f74f1aa2e900e 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml @@ -23,3 +23,6 @@ template: index: default_pipeline: logs-apm.app@default-pipeline final_pipeline: apm@pipeline + lifecycle: + name: logs-apm.app_logs-default_policy + prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml index 2cfa7b454722f..0ab9f01a76c5c 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml @@ -30,3 +30,6 @@ template: index: default_pipeline: logs-apm.error@default-pipeline final_pipeline: apm@pipeline + lifecycle: + name: logs-apm.error_logs-default_policy + prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml index a3c7ab7c05193..5659a5c2cbd55 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml @@ -24,3 +24,6 @@ template: index: default_pipeline: metrics-apm.app@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.app_metrics-default_policy + prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml index 4c7df377a6cfa..8e5fca051aaeb 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml @@ -25,6 +25,9 @@ template: index: default_pipeline: metrics-apm.internal@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.internal_metrics-default_policy + prefer_ilm: false mappings: properties: data_stream.dataset: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml index 63c9ff9c3b988..23db583d3a30f 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml @@ -27,6 +27,9 @@ template: index: default_pipeline: metrics-apm.service_destination@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_destination_10m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml index 6995a2d09b12e..4cbeb5053d072 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml @@ -26,6 +26,9 @@ template: index: default_pipeline: metrics-apm.service_destination@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_destination_1m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml index b39d0beca3740..d29f953cb73a1 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml @@ -27,6 +27,9 @@ template: index: default_pipeline: metrics-apm.service_destination@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_destination_60m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml index 8d92b21866bb8..57f63b9ed7dcc 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml @@ -27,6 +27,9 @@ template: index: default_pipeline: metrics-apm.service_summary@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_summary_10m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml index de19df330aa0e..6b8e604e3f03e 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml @@ -26,6 +26,9 @@ template: index: default_pipeline: metrics-apm.service_summary@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_summary_1m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml index 002676eb08cc1..1c16e20a34f51 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml @@ -27,6 +27,9 @@ template: index: default_pipeline: metrics-apm.service_summary@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_summary_60m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml index 549af3942dcd3..db85407599f67 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml @@ -27,6 +27,9 @@ template: index: default_pipeline: metrics-apm.service_transaction@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_transaction_10m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml index 9bdacfc337663..9e3220b2c4c3a 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml @@ -26,6 +26,9 @@ template: index: default_pipeline: metrics-apm.service_transaction@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_transaction_1m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml index 8bcbeb53c74fe..c10435b2b50a6 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml @@ -27,6 +27,9 @@ template: index: default_pipeline: metrics-apm.service_transaction@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_transaction_60m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml index 68c1dc0f31c1e..92c6a430a377d 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml @@ -27,6 +27,9 @@ template: index: default_pipeline: metrics-apm.transaction@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.transaction_10m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml index 6065f6e12f999..78ed0959f270f 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml @@ -26,6 +26,9 @@ template: index: default_pipeline: metrics-apm.transaction@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.transaction_1m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml index d8889ceb63f87..3625ecfc1458b 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml @@ -27,6 +27,9 @@ template: index: default_pipeline: metrics-apm.transaction@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.transaction_60m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml index d299481ff6e21..53647284d2b91 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml @@ -25,6 +25,9 @@ template: index: default_pipeline: traces-apm.rum@default-pipeline final_pipeline: traces-apm@pipeline + lifecycle: + name: traces-apm.rum_traces-default_policy + prefer_ilm: false mappings: properties: data_stream.type: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml index 81457e2f204cb..9cffe241e0979 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml @@ -20,6 +20,11 @@ ignore_missing_component_templates: template: lifecycle: data_retention: 1h + settings: + index: + lifecycle: + name: traces-apm.sampled_traces-default_policy + prefer_ilm: false mappings: properties: data_stream.type: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml index fda953171b793..bcf406faa71da 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml @@ -24,6 +24,9 @@ template: index: default_pipeline: traces-apm@default-pipeline final_pipeline: traces-apm@pipeline + lifecycle: + name: traces-apm.traces-default_policy + prefer_ilm: false mappings: properties: data_stream.type: diff --git a/x-pack/plugin/apm-data/src/main/resources/resources.yaml b/x-pack/plugin/apm-data/src/main/resources/resources.yaml index fa38fda679e49..cd2111ffb9f83 100644 --- a/x-pack/plugin/apm-data/src/main/resources/resources.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/resources.yaml @@ -1,7 +1,7 @@ # "version" holds the version of the templates and ingest pipelines installed # by xpack-plugin apm-data. This must be increased whenever an existing template or # pipeline is changed, in order for it to be updated on Elasticsearch upgrade. -version: 6 +version: 7 component-templates: # Data lifecycle. From 7759b553b53a657da3d19bcb3d84070309357aee Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 22 Aug 2024 09:31:51 +0700 Subject: [PATCH 134/389] Improve zstd test coverage by adding a test duel (#112048) Adding test duel between out of the box stored fields codec and zstd stored field codecs: - lz4 compared to zstd level 0 (best speed) - deflate compared zstd level 3 (best compression) Relates #108706 --- .../codec/zstd/StoredFieldCodecDuelTests.java | 109 ++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 server/src/test/java/org/elasticsearch/index/codec/zstd/StoredFieldCodecDuelTests.java diff --git a/server/src/test/java/org/elasticsearch/index/codec/zstd/StoredFieldCodecDuelTests.java b/server/src/test/java/org/elasticsearch/index/codec/zstd/StoredFieldCodecDuelTests.java new file mode 100644 index 0000000000000..93e9911746d18 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/codec/zstd/StoredFieldCodecDuelTests.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec.zstd; + +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.tests.index.ForceMergePolicy; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.index.codec.LegacyPerFieldMapperCodec; +import org.elasticsearch.index.codec.PerFieldMapperCodec; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class StoredFieldCodecDuelTests extends ESTestCase { + + private static final String STRING_FIELD = "string_field_1"; + private static final String BINARY_FIELD = "binary_field_2"; + private static final String INT_FIELD = "int_field_3"; + private static final String LONG_FIELD = "long_field_4"; + private static final String FLOAT_FIELD = "float_field_5"; + private static final String DOUBLE_FIELD = "double_field_5"; + + public void testDuelBestSpeed() throws IOException { + var baseline = new LegacyPerFieldMapperCodec(Lucene99Codec.Mode.BEST_SPEED, null, BigArrays.NON_RECYCLING_INSTANCE); + var contender = new PerFieldMapperCodec(Zstd814StoredFieldsFormat.Mode.BEST_SPEED, null, BigArrays.NON_RECYCLING_INSTANCE); + doTestDuel(baseline, contender); + } + + public void testDuelBestCompression() throws IOException { + var baseline = new LegacyPerFieldMapperCodec(Lucene99Codec.Mode.BEST_COMPRESSION, null, BigArrays.NON_RECYCLING_INSTANCE); + var contender = new PerFieldMapperCodec(Zstd814StoredFieldsFormat.Mode.BEST_COMPRESSION, null, BigArrays.NON_RECYCLING_INSTANCE); + doTestDuel(baseline, contender); + } + + static void doTestDuel(Codec baslineCodec, Codec contenderCodec) throws IOException { + try (var baselineDirectory = newDirectory(); var contenderDirectory = newDirectory()) { + int numDocs = randomIntBetween(256, 8096); + + var mergePolicy = new ForceMergePolicy(newLogMergePolicy()); + var baselineConfig = newIndexWriterConfig(); + baselineConfig.setMergePolicy(mergePolicy); + baselineConfig.setCodec(baslineCodec); + var contenderConf = newIndexWriterConfig(); + contenderConf.setCodec(contenderCodec); + contenderConf.setMergePolicy(mergePolicy); + + try ( + var baselineIw = new RandomIndexWriter(random(), baselineDirectory, baselineConfig); + var contenderIw = new RandomIndexWriter(random(), contenderDirectory, contenderConf) + ) { + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + doc.add(new StoredField(STRING_FIELD, randomAlphaOfLength(randomIntBetween(1, 4096)))); + doc.add(new StoredField(BINARY_FIELD, randomByteArrayOfLength(randomIntBetween(1, 4096)))); + doc.add(new StoredField(INT_FIELD, randomInt())); + doc.add(new StoredField(LONG_FIELD, randomLong())); + doc.add(new StoredField(FLOAT_FIELD, randomFloat())); + doc.add(new StoredField(DOUBLE_FIELD, randomDouble())); + baselineIw.addDocument(doc); + contenderIw.addDocument(doc); + } + baselineIw.forceMerge(1); + contenderIw.forceMerge(1); + } + try (var baselineIr = DirectoryReader.open(baselineDirectory); var contenderIr = DirectoryReader.open(contenderDirectory)) { + assertEquals(1, baselineIr.leaves().size()); + assertEquals(1, contenderIr.leaves().size()); + + var baseLeafReader = baselineIr.leaves().get(0).reader(); + var contenderLeafReader = contenderIr.leaves().get(0).reader(); + assertEquals(baseLeafReader.maxDoc(), contenderLeafReader.maxDoc()); + + for (int docId = 0; docId < contenderLeafReader.maxDoc(); docId++) { + Document baselineDoc = baseLeafReader.storedFields().document(docId); + Document contenderDoc = contenderLeafReader.storedFields().document(docId); + assertThat(contenderDoc.getFields().size(), equalTo(baselineDoc.getFields().size())); + for (int i = 0; i < baselineDoc.getFields().size(); i++) { + var baselineField = baselineDoc.getFields().get(i); + var contenderField = contenderDoc.getFields().get(i); + assertThat(contenderField.name(), equalTo(baselineField.name())); + switch (baselineField.name()) { + case STRING_FIELD -> assertThat(contenderField.stringValue(), equalTo(baselineField.stringValue())); + case BINARY_FIELD -> assertThat(contenderField.binaryValue(), equalTo(baselineField.binaryValue())); + case INT_FIELD, LONG_FIELD, FLOAT_FIELD, DOUBLE_FIELD -> assertThat( + contenderField.numericValue(), + equalTo(baselineField.numericValue()) + ); + default -> fail("unexpected field [" + baselineField.name() + "]"); + } + } + } + } + } + } + +} From a226826786d27d2ed16cc509dd28d454f58ee3d1 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 22 Aug 2024 14:28:36 +1000 Subject: [PATCH 135/389] Mute org.elasticsearch.index.codec.tsdb.DocValuesCodecDuelTests testDuel #112082 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index f480938c24a13..18c04c774d487 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -176,6 +176,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.single_node.RestEsqlIT method: testForceSleepsProfile {ASYNC} issue: https://github.com/elastic/elasticsearch/issues/112049 +- class: org.elasticsearch.index.codec.tsdb.DocValuesCodecDuelTests + method: testDuel + issue: https://github.com/elastic/elasticsearch/issues/112082 # Examples: # From 158901577d27b426543cf1a30a7b9bd6cf93a0ba Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 22 Aug 2024 15:09:35 +1000 Subject: [PATCH 136/389] Mute org.elasticsearch.xpack.inference.InferenceRestIT test {p0=inference/80_random_rerank_retriever/Random rerank retriever predictably shuffles results} #111999 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 18c04c774d487..96fa1c674a27e 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -179,6 +179,9 @@ tests: - class: org.elasticsearch.index.codec.tsdb.DocValuesCodecDuelTests method: testDuel issue: https://github.com/elastic/elasticsearch/issues/112082 +- class: org.elasticsearch.xpack.inference.InferenceRestIT + method: test {p0=inference/80_random_rerank_retriever/Random rerank retriever predictably shuffles results} + issue: https://github.com/elastic/elasticsearch/issues/111999 # Examples: # From 10d665ba6bbbc007b42c2c85c6470323555c8be9 Mon Sep 17 00:00:00 2001 From: Niels Bauman <33722607+nielsbauman@users.noreply.github.com> Date: Thu, 22 Aug 2024 08:10:12 +0200 Subject: [PATCH 137/389] Collect APM metrics for failure stores (#108279) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR adds APM metrics for failure stores. See the JavaDoc comments in `FailureStoreMetrics.java` for a detailed explanation on the individual metrics. --- .../IngestFailureStoreMetricsIT.java | 428 ++++++++++++++++++ .../action/bulk/BulkOperation.java | 90 ++-- .../action/bulk/FailureStoreMetrics.java | 98 ++++ .../bulk/TransportAbstractBulkAction.java | 8 +- .../action/bulk/TransportBulkAction.java | 100 ++-- .../bulk/TransportSimulateBulkAction.java | 4 +- .../cluster/metadata/DataStream.java | 19 + .../elasticsearch/ingest/IngestService.java | 48 +- .../elasticsearch/node/NodeConstruction.java | 6 +- .../action/bulk/BulkOperationTests.java | 3 +- ...ActionIndicesThatCannotBeCreatedTests.java | 3 +- .../bulk/TransportBulkActionIngestTests.java | 14 +- .../action/bulk/TransportBulkActionTests.java | 21 +- .../bulk/TransportBulkActionTookTests.java | 3 +- .../ingest/ReservedPipelineActionTests.java | 4 +- .../ingest/IngestServiceTests.java | 33 +- .../ingest/SimulateIngestServiceTests.java | 17 +- .../snapshots/SnapshotResiliencyTests.java | 7 +- ...sportGetTrainedModelsStatsActionTests.java | 4 +- 19 files changed, 781 insertions(+), 129 deletions(-) create mode 100644 modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java create mode 100644 server/src/main/java/org/elasticsearch/action/bulk/FailureStoreMetrics.java diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java new file mode 100644 index 0000000000000..a52016e8c7f0b --- /dev/null +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java @@ -0,0 +1,428 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams; + +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.TransportIndicesAliasesAction; +import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockRequest; +import org.elasticsearch.action.admin.indices.readonly.TransportAddIndexBlockAction; +import org.elasticsearch.action.admin.indices.rollover.RolloverAction; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.FailureStoreMetrics; +import org.elasticsearch.action.datastreams.CreateDataStreamAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.core.Strings; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.IngestTestPlugin; +import org.elasticsearch.ingest.Processor; +import org.elasticsearch.ingest.TestProcessor; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.function.Consumer; + +import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.DEFAULT_TIMESTAMP_FIELD; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +/** + * An integration test that verifies how different paths/scenarios affect the APM metrics for failure stores. + */ +@ESIntegTestCase.ClusterScope(numDataNodes = 0, numClientNodes = 0, scope = ESIntegTestCase.Scope.SUITE) +public class IngestFailureStoreMetricsIT extends ESIntegTestCase { + + private static final List METRICS = List.of( + FailureStoreMetrics.METRIC_TOTAL, + FailureStoreMetrics.METRIC_FAILURE_STORE, + FailureStoreMetrics.METRIC_REJECTED + ); + + private String template; + private String dataStream; + private String pipeline; + + @Before + public void initializeRandomNames() { + template = "template-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + dataStream = "data-stream-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + pipeline = "pipeline-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + logger.info( + "--> running [{}] with generated names data stream [{}], template [{}] and pipeline [{}]", + getTestName(), + dataStream, + template, + pipeline + ); + } + + @Override + protected Collection> nodePlugins() { + return List.of(DataStreamsPlugin.class, CustomIngestTestPlugin.class, TestTelemetryPlugin.class, MapperExtrasPlugin.class); + } + + public void testNoPipelineNoFailures() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + + int nrOfDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfDocs, null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + public void testFailingPipelineNoFailureStore() throws IOException { + putComposableIndexTemplate(false); + createDataStream(); + createBasicPipeline("fail"); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, pipeline); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_REJECTED), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.PIPELINE, + false + ); + } + + public void testFailingPipelineWithFailureStore() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + createBasicPipeline("fail"); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, pipeline); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.PIPELINE + ); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + public void testShardFailureNoFailureStore() throws IOException { + putComposableIndexTemplate(false); + createDataStream(); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, "\"foo\"", null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_REJECTED), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.SHARD, + false + ); + } + + public void testShardFailureWithFailureStore() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, "\"foo\"", null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.SHARD + ); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + /** + * Make sure the rejected counter gets incremented when there were shard-level failures while trying to redirect a document to the + * failure store. + */ + public void testRejectionFromFailureStore() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + + // Initialize failure store. + var rolloverRequest = new RolloverRequest(dataStream, null); + rolloverRequest.setIndicesOptions( + IndicesOptions.builder(rolloverRequest.indicesOptions()) + .failureStoreOptions(opts -> opts.includeFailureIndices(true).includeRegularIndices(false)) + .build() + ); + var rolloverResponse = client().execute(RolloverAction.INSTANCE, rolloverRequest).actionGet(); + var failureStoreIndex = rolloverResponse.getNewIndex(); + // Add a write block to the failure store index, which causes shard-level "failures". + var addIndexBlockRequest = new AddIndexBlockRequest(IndexMetadata.APIBlock.WRITE, failureStoreIndex); + client().execute(TransportAddIndexBlockAction.TYPE, addIndexBlockRequest).actionGet(); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, "\"foo\"", null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.SHARD + ); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_REJECTED), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.SHARD, + true + ); + } + + /** + * Make sure metrics get the correct data_stream attribute after a reroute. + */ + public void testRerouteSuccessfulCorrectName() throws IOException { + putComposableIndexTemplate(false); + createDataStream(); + + String destination = dataStream + "-destination"; + final var createDataStreamRequest = new CreateDataStreamAction.Request(destination); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); + createReroutePipeline(destination); + + int nrOfDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfDocs, pipeline); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfDocs, destination); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + public void testDropping() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + createBasicPipeline("drop"); + + int nrOfDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfDocs, pipeline); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + public void testDataStreamAlias() throws IOException { + putComposableIndexTemplate(false); + createDataStream(); + var indicesAliasesRequest = new IndicesAliasesRequest(); + indicesAliasesRequest.addAliasAction( + IndicesAliasesRequest.AliasActions.add().alias("some-alias").index(dataStream).writeIndex(true) + ); + client().execute(TransportIndicesAliasesAction.TYPE, indicesAliasesRequest).actionGet(); + + int nrOfDocs = randomIntBetween(5, 10); + indexDocs("some-alias", nrOfDocs, null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + private void putComposableIndexTemplate(boolean failureStore) throws IOException { + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(template); + request.indexTemplate( + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStream + "*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, failureStore)) + .template(new Template(null, new CompressedXContent(""" + { + "dynamic": false, + "properties": { + "@timestamp": { + "type": "date" + }, + "count": { + "type": "long" + } + } + }"""), null)) + .build() + ); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); + } + + private void createDataStream() { + final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStream); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); + } + + private void createBasicPipeline(String processorType) { + createPipeline(Strings.format("\"%s\": {}", processorType)); + } + + private void createReroutePipeline(String destination) { + createPipeline(Strings.format("\"reroute\": {\"destination\": \"%s\"}", destination)); + } + + private void createPipeline(String processor) { + String pipelineDefinition = Strings.format("{\"processors\": [{%s}]}", processor); + BytesReference bytes = new BytesArray(pipelineDefinition); + clusterAdmin().putPipeline(new PutPipelineRequest(pipeline, bytes, XContentType.JSON)).actionGet(); + } + + private void indexDocs(String dataStream, int numDocs, String pipeline) { + indexDocs(dataStream, numDocs, "1", pipeline); + } + + private void indexDocs(String dataStream, int numDocs, String value, String pipeline) { + BulkRequest bulkRequest = new BulkRequest().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < numDocs; i++) { + String time = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis()); + bulkRequest.add( + new IndexRequest(dataStream).opType(DocWriteRequest.OpType.CREATE) + .source(Strings.format("{\"%s\":\"%s\", \"count\": %s}", DEFAULT_TIMESTAMP_FIELD, time, value), XContentType.JSON) + .setPipeline(pipeline) + ); + } + client().bulk(bulkRequest).actionGet(); + } + + private static Map> collectTelemetry() { + Map> measurements = new HashMap<>(); + for (PluginsService pluginsService : internalCluster().getInstances(PluginsService.class)) { + final TestTelemetryPlugin telemetryPlugin = pluginsService.filterPlugins(TestTelemetryPlugin.class).findFirst().orElseThrow(); + + telemetryPlugin.collect(); + + for (String metricName : METRICS) { + measurements.put(metricName, telemetryPlugin.getLongCounterMeasurement(metricName)); + } + } + return measurements; + } + + private void assertMeasurements(List measurements, int expectedSize, String expectedDataStream) { + assertMeasurements(measurements, expectedSize, expectedDataStream, (Consumer) null); + } + + private void assertMeasurements( + List measurements, + int expectedSize, + String expectedDataStream, + FailureStoreMetrics.ErrorLocation location + ) { + assertMeasurements( + measurements, + expectedSize, + expectedDataStream, + measurement -> assertEquals(location.name(), measurement.attributes().get("error_location")) + ); + } + + private void assertMeasurements( + List measurements, + int expectedSize, + String expectedDataStream, + FailureStoreMetrics.ErrorLocation location, + boolean failureStore + ) { + assertMeasurements(measurements, expectedSize, expectedDataStream, measurement -> { + assertEquals(location.name(), measurement.attributes().get("error_location")); + assertEquals(failureStore, measurement.attributes().get("failure_store")); + }); + } + + private void assertMeasurements( + List measurements, + int expectedSize, + String expectedDataStream, + Consumer customAssertion + ) { + assertEquals(expectedSize, measurements.size()); + for (Measurement measurement : measurements) { + assertEquals(expectedDataStream, measurement.attributes().get("data_stream")); + if (customAssertion != null) { + customAssertion.accept(measurement); + } + } + } + + public static class CustomIngestTestPlugin extends IngestTestPlugin { + @Override + public Map getProcessors(Processor.Parameters parameters) { + Map processors = new HashMap<>(); + processors.put( + "drop", + (factories, tag, description, config) -> new TestProcessor(tag, "drop", description, ingestDocument -> null) + ); + processors.put("reroute", (factories, tag, description, config) -> { + String destination = (String) config.remove("destination"); + return new TestProcessor( + tag, + "reroute", + description, + (Consumer) ingestDocument -> ingestDocument.reroute(destination) + ); + }); + processors.put( + "fail", + (processorFactories, tag, description, config) -> new TestProcessor(tag, "fail", description, new RuntimeException()) + ); + return processors; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index 258e5b4c9a58d..813203afe42c5 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -10,7 +10,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; @@ -91,6 +93,7 @@ final class BulkOperation extends ActionRunnable { private final OriginSettingClient rolloverClient; private final Set failureStoresToBeRolledOver = ConcurrentCollections.newConcurrentSet(); private final Set failedRolloverRequests = ConcurrentCollections.newConcurrentSet(); + private final FailureStoreMetrics failureStoreMetrics; BulkOperation( Task task, @@ -104,7 +107,8 @@ final class BulkOperation extends ActionRunnable { IndexNameExpressionResolver indexNameExpressionResolver, LongSupplier relativeTimeProvider, long startTimeNanos, - ActionListener listener + ActionListener listener, + FailureStoreMetrics failureStoreMetrics ) { this( task, @@ -120,7 +124,8 @@ final class BulkOperation extends ActionRunnable { startTimeNanos, listener, new ClusterStateObserver(clusterService, bulkRequest.timeout(), logger, threadPool.getThreadContext()), - new FailureStoreDocumentConverter() + new FailureStoreDocumentConverter(), + failureStoreMetrics ); } @@ -138,7 +143,8 @@ final class BulkOperation extends ActionRunnable { long startTimeNanos, ActionListener listener, ClusterStateObserver observer, - FailureStoreDocumentConverter failureStoreDocumentConverter + FailureStoreDocumentConverter failureStoreDocumentConverter, + FailureStoreMetrics failureStoreMetrics ) { super(listener); this.task = task; @@ -156,6 +162,7 @@ final class BulkOperation extends ActionRunnable { this.observer = observer; this.failureStoreDocumentConverter = failureStoreDocumentConverter; this.rolloverClient = new OriginSettingClient(client, LAZY_ROLLOVER_ORIGIN); + this.failureStoreMetrics = failureStoreMetrics; } @Override @@ -437,17 +444,11 @@ public void onResponse(BulkShardResponse bulkShardResponse) { for (int idx = 0; idx < bulkShardResponse.getResponses().length; idx++) { // We zip the requests and responses together so that we can identify failed documents and potentially store them BulkItemResponse bulkItemResponse = bulkShardResponse.getResponses()[idx]; + BulkItemRequest bulkItemRequest = bulkShardRequest.items()[idx]; if (bulkItemResponse.isFailed()) { - BulkItemRequest bulkItemRequest = bulkShardRequest.items()[idx]; assert bulkItemRequest.id() == bulkItemResponse.getItemId() : "Bulk items were returned out of order"; - - DataStream failureStoreReference = getRedirectTarget(bulkItemRequest.request(), getClusterState().metadata()); - if (failureStoreReference != null) { - maybeMarkFailureStoreForRollover(failureStoreReference); - var cause = bulkItemResponse.getFailure().getCause(); - addDocumentToRedirectRequests(bulkItemRequest, cause, failureStoreReference.getName()); - } + processFailure(bulkItemRequest, bulkItemResponse.getFailure().getCause()); addFailure(bulkItemResponse); } else { bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); @@ -464,11 +465,7 @@ public void onFailure(Exception e) { final String indexName = request.index(); DocWriteRequest docWriteRequest = request.request(); - DataStream failureStoreReference = getRedirectTarget(docWriteRequest, getClusterState().metadata()); - if (failureStoreReference != null) { - maybeMarkFailureStoreForRollover(failureStoreReference); - addDocumentToRedirectRequests(request, e, failureStoreReference.getName()); - } + processFailure(request, e); addFailure(docWriteRequest, request.id(), indexName, e); } completeShardOperation(); @@ -479,45 +476,56 @@ private void completeShardOperation() { clusterState = null; releaseOnFinish.close(); } + + private void processFailure(BulkItemRequest bulkItemRequest, Exception cause) { + var errorType = ElasticsearchException.getExceptionName(ExceptionsHelper.unwrapCause(cause)); + DocWriteRequest docWriteRequest = bulkItemRequest.request(); + DataStream failureStoreCandidate = getRedirectTargetCandidate(docWriteRequest, getClusterState().metadata()); + // If the candidate is not null, the BulkItemRequest targets a data stream, but we'll still have to check if + // it has the failure store enabled. + if (failureStoreCandidate != null) { + // Do not redirect documents to a failure store that were already headed to one. + var isFailureStoreDoc = docWriteRequest instanceof IndexRequest indexRequest && indexRequest.isWriteToFailureStore(); + if (isFailureStoreDoc == false && failureStoreCandidate.isFailureStoreEnabled()) { + // Redirect to failure store. + maybeMarkFailureStoreForRollover(failureStoreCandidate); + addDocumentToRedirectRequests(bulkItemRequest, cause, failureStoreCandidate.getName()); + failureStoreMetrics.incrementFailureStore( + bulkItemRequest.index(), + errorType, + FailureStoreMetrics.ErrorLocation.SHARD + ); + } else { + // If we can't redirect to a failure store (because either the data stream doesn't have the failure store enabled + // or this request was already targeting a failure store), we increment the rejected counter. + failureStoreMetrics.incrementRejected( + bulkItemRequest.index(), + errorType, + FailureStoreMetrics.ErrorLocation.SHARD, + isFailureStoreDoc + ); + } + } + } }); } /** - * Determines if the write request can be redirected if it fails. Write requests can be redirected IFF they are targeting a data stream - * with a failure store and are not already redirected themselves. If the document can be redirected, the data stream name to use for - * the redirection is returned. + * Tries to find a candidate redirect target for this write request. A candidate redirect target is a data stream that may or + * may not have the failure store enabled. * * @param docWriteRequest the write request to check * @param metadata cluster state metadata for resolving index abstractions - * @return a data stream if the write request points to a data stream that has the failure store enabled, or {@code null} if it does not + * @return a data stream if the write request points to a data stream, or {@code null} if it does not */ - private static DataStream getRedirectTarget(DocWriteRequest docWriteRequest, Metadata metadata) { + private static DataStream getRedirectTargetCandidate(DocWriteRequest docWriteRequest, Metadata metadata) { // Feature flag guard if (DataStream.isFailureStoreFeatureFlagEnabled() == false) { return null; } - // Do not resolve a failure store for documents that were already headed to one - if (docWriteRequest instanceof IndexRequest indexRequest && indexRequest.isWriteToFailureStore()) { - return null; - } // If there is no index abstraction, then the request is using a pattern of some sort, which data streams do not support IndexAbstraction ia = metadata.getIndicesLookup().get(docWriteRequest.index()); - if (ia == null) { - return null; - } - if (ia.isDataStreamRelated()) { - // The index abstraction could be an alias. Alias abstractions (even for data streams) only keep track of which _index_ they - // will write to, not which _data stream_. - // We work backward to find the data stream from the concrete write index to cover this case. - Index concreteIndex = ia.getWriteIndex(); - IndexAbstraction writeIndexAbstraction = metadata.getIndicesLookup().get(concreteIndex.getName()); - DataStream parentDataStream = writeIndexAbstraction.getParentDataStream(); - if (parentDataStream != null && parentDataStream.isFailureStoreEnabled()) { - // Keep the data stream name around to resolve the redirect to failure store if the shard level request fails. - return parentDataStream; - } - } - return null; + return DataStream.resolveDataStream(ia, metadata); } /** diff --git a/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreMetrics.java b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreMetrics.java new file mode 100644 index 0000000000000..5a36f10785790 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreMetrics.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +import java.util.Map; + +/** + * A class containing APM metrics for failure stores. See the JavaDoc on the individual methods for an explanation on what they're tracking. + * General notes: + *
      + *
    • When a document is rerouted in a pipeline, the destination data stream is used for the metric attribute(s).
    • + *
    + */ +public class FailureStoreMetrics { + + public static final FailureStoreMetrics NOOP = new FailureStoreMetrics(MeterRegistry.NOOP); + + public static final String METRIC_TOTAL = "es.data_stream.ingest.documents.total"; + public static final String METRIC_FAILURE_STORE = "es.data_stream.ingest.documents.failure_store.total"; + public static final String METRIC_REJECTED = "es.data_stream.ingest.documents.rejected.total"; + + private final LongCounter totalCounter; + private final LongCounter failureStoreCounter; + private final LongCounter rejectedCounter; + + public FailureStoreMetrics(MeterRegistry meterRegistry) { + totalCounter = meterRegistry.registerLongCounter(METRIC_TOTAL, "total number of documents that were sent to a data stream", "unit"); + failureStoreCounter = meterRegistry.registerLongCounter( + METRIC_FAILURE_STORE, + "number of documents that got redirected to the failure store", + "unit" + ); + rejectedCounter = meterRegistry.registerLongCounter(METRIC_REJECTED, "number of documents that were rejected", "unit"); + } + + /** + * This counter tracks the number of documents that we tried to index into a data stream. This includes documents + * that were dropped by a pipeline. This counter will only be incremented once for every incoming document (even when it gets + * redirected to the failure store and/or gets rejected). + * @param dataStream the name of the data stream + */ + public void incrementTotal(String dataStream) { + totalCounter.incrementBy(1, Map.of("data_stream", dataStream)); + } + + /** + * This counter tracks the number of documents that we tried to store into a failure store. This includes both pipeline and + * shard-level failures. + * @param dataStream the name of the data stream + * @param errorType the error type (i.e. the name of the exception that was thrown) + * @param errorLocation where this failure occurred + */ + public void incrementFailureStore(String dataStream, String errorType, ErrorLocation errorLocation) { + failureStoreCounter.incrementBy( + 1, + Map.of("data_stream", dataStream, "error_type", errorType, "error_location", errorLocation.name()) + ); + } + + /** + * This counter tracks the number of documents that failed to get stored in Elasticsearch. Meaning, any document that did not get + * stored in the data stream or in its failure store. + * @param dataStream the name of the data stream + * @param errorType the error type (i.e. the name of the exception that was thrown) + * @param errorLocation where this failure occurred + * @param failureStore whether this failure occurred while trying to ingest into a failure store (true) or in the data + * stream itself (false) + */ + public void incrementRejected(String dataStream, String errorType, ErrorLocation errorLocation, boolean failureStore) { + rejectedCounter.incrementBy( + 1, + Map.of( + "data_stream", + dataStream, + "error_type", + errorType, + "error_location", + errorLocation.name(), + "failure_store", + failureStore + ) + ); + } + + public enum ErrorLocation { + PIPELINE, + SHARD; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java index c44ad505aea84..74864abe3ec50 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java @@ -222,7 +222,7 @@ private void processBulkIndexIngestRequest( original.numberOfActions(), () -> bulkRequestModifier, bulkRequestModifier::markItemAsDropped, - (indexName) -> shouldStoreFailure(indexName, metadata, threadPool.absoluteTimeInMillis()), + (indexName) -> resolveFailureStore(indexName, metadata, threadPool.absoluteTimeInMillis()), bulkRequestModifier::markItemForFailureStore, bulkRequestModifier::markItemAsFailed, (originalThread, exception) -> { @@ -274,13 +274,15 @@ public boolean isForceExecution() { /** * Determines if an index name is associated with either an existing data stream or a template * for one that has the failure store enabled. + * * @param indexName The index name to check. * @param metadata Cluster state metadata. * @param epochMillis A timestamp to use when resolving date math in the index name. * @return true if this is not a simulation, and the given index name corresponds to a data stream with a failure store - * or if it matches a template that has a data stream failure store enabled. + * or if it matches a template that has a data stream failure store enabled. Returns false if the index name corresponds to a + * data stream, but it doesn't have the failure store enabled. Returns null when it doesn't correspond to a data stream. */ - protected abstract boolean shouldStoreFailure(String indexName, Metadata metadata, long epochMillis); + protected abstract Boolean resolveFailureStore(String indexName, Metadata metadata, long epochMillis); /** * Retrieves the {@link IndexRequest} from the provided {@link DocWriteRequest} for index or upsert actions. Upserts are diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index a695e0f5e8ab6..bdda4ff487f6b 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -42,7 +42,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.features.FeatureService; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.VersionType; @@ -57,7 +56,6 @@ import java.util.HashSet; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.Set; import java.util.SortedMap; import java.util.concurrent.Executor; @@ -82,6 +80,7 @@ public class TransportBulkAction extends TransportAbstractBulkAction { private final NodeClient client; private final IndexNameExpressionResolver indexNameExpressionResolver; private final OriginSettingClient rolloverClient; + private final FailureStoreMetrics failureStoreMetrics; @Inject public TransportBulkAction( @@ -94,7 +93,8 @@ public TransportBulkAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, - SystemIndices systemIndices + SystemIndices systemIndices, + FailureStoreMetrics failureStoreMetrics ) { this( threadPool, @@ -107,7 +107,8 @@ public TransportBulkAction( indexNameExpressionResolver, indexingPressure, systemIndices, - threadPool::relativeTimeInNanos + threadPool::relativeTimeInNanos, + failureStoreMetrics ); } @@ -122,7 +123,8 @@ public TransportBulkAction( IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, SystemIndices systemIndices, - LongSupplier relativeTimeProvider + LongSupplier relativeTimeProvider, + FailureStoreMetrics failureStoreMetrics ) { this( TYPE, @@ -137,7 +139,8 @@ public TransportBulkAction( indexNameExpressionResolver, indexingPressure, systemIndices, - relativeTimeProvider + relativeTimeProvider, + failureStoreMetrics ); } @@ -154,7 +157,8 @@ public TransportBulkAction( IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, SystemIndices systemIndices, - LongSupplier relativeTimeProvider + LongSupplier relativeTimeProvider, + FailureStoreMetrics failureStoreMetrics ) { super( bulkAction, @@ -173,6 +177,7 @@ public TransportBulkAction( this.client = client; this.indexNameExpressionResolver = indexNameExpressionResolver; this.rolloverClient = new OriginSettingClient(client, LAZY_ROLLOVER_ORIGIN); + this.failureStoreMetrics = failureStoreMetrics; } public static ActionListener unwrappingSingleItemBulkResponse( @@ -199,6 +204,8 @@ protected void doInternalExecute( ActionListener listener, long relativeStartTimeNanos ) { + trackIndexRequests(bulkRequest); + Map indicesToAutoCreate = new HashMap<>(); Set dataStreamsToBeRolledOver = new HashSet<>(); Set failureStoresToBeRolledOver = new HashSet<>(); @@ -216,6 +223,27 @@ protected void doInternalExecute( ); } + /** + * Track the number of index requests in our APM metrics. We'll track almost all docs here (pipeline or no pipeline, + * failure store or original), but some docs don't reach this place (dropped and rejected docs), so we increment for those docs in + * different places. + */ + private void trackIndexRequests(BulkRequest bulkRequest) { + final Metadata metadata = clusterService.state().metadata(); + for (DocWriteRequest request : bulkRequest.requests) { + if (request instanceof IndexRequest == false) { + continue; + } + String resolvedIndexName = IndexNameExpressionResolver.resolveDateMathExpression(request.index()); + IndexAbstraction indexAbstraction = metadata.getIndicesLookup().get(resolvedIndexName); + DataStream dataStream = DataStream.resolveDataStream(indexAbstraction, metadata); + // We only track index requests into data streams. + if (dataStream != null) { + failureStoreMetrics.incrementTotal(dataStream.getName()); + } + } + } + /** * Determine all the targets (i.e. indices, data streams, failure stores) that require an action before we can proceed with the bulk * request. Indices might need to be created, and data streams and failure stores might need to be rolled over when they're marked @@ -535,29 +563,29 @@ void executeBulk( indexNameExpressionResolver, relativeTimeNanosProvider, startTimeNanos, - listener + listener, + failureStoreMetrics ).run(); } /** - * Determines if an index name is associated with either an existing data stream or a template - * for one that has the failure store enabled. - * @param indexName The index name to check. - * @param metadata Cluster state metadata. - * @param epochMillis A timestamp to use when resolving date math in the index name. - * @return true if the given index name corresponds to a data stream with a failure store, - * or if it matches a template that has a data stream failure store enabled. + * See {@link #resolveFailureStore(String, Metadata, long)} */ - static boolean shouldStoreFailureInternal(String indexName, Metadata metadata, long epochMillis) { - return DataStream.isFailureStoreFeatureFlagEnabled() - && resolveFailureStoreFromMetadata(indexName, metadata, epochMillis).or( - () -> resolveFailureStoreFromTemplate(indexName, metadata) - ).orElse(false); + // Visibility for testing + static Boolean resolveFailureInternal(String indexName, Metadata metadata, long epochMillis) { + if (DataStream.isFailureStoreFeatureFlagEnabled() == false) { + return null; + } + var resolution = resolveFailureStoreFromMetadata(indexName, metadata, epochMillis); + if (resolution != null) { + return resolution; + } + return resolveFailureStoreFromTemplate(indexName, metadata); } @Override - protected boolean shouldStoreFailure(String indexName, Metadata metadata, long time) { - return shouldStoreFailureInternal(indexName, metadata, time); + protected Boolean resolveFailureStore(String indexName, Metadata metadata, long time) { + return resolveFailureInternal(indexName, metadata, time); } /** @@ -567,30 +595,24 @@ protected boolean shouldStoreFailure(String indexName, Metadata metadata, long t * @param epochMillis A timestamp to use when resolving date math in the index name. * @return true if the given index name corresponds to an existing data stream with a failure store enabled. */ - private static Optional resolveFailureStoreFromMetadata(String indexName, Metadata metadata, long epochMillis) { + private static Boolean resolveFailureStoreFromMetadata(String indexName, Metadata metadata, long epochMillis) { if (indexName == null) { - return Optional.empty(); + return null; } // Get index abstraction, resolving date math if it exists IndexAbstraction indexAbstraction = metadata.getIndicesLookup() .get(IndexNameExpressionResolver.resolveDateMathExpression(indexName, epochMillis)); - - // We only store failures if the failure is being written to a data stream, - // not when directly writing to backing indices/failure stores if (indexAbstraction == null || indexAbstraction.isDataStreamRelated() == false) { - return Optional.empty(); + return null; } - // Locate the write index for the abstraction, and check if it has a data stream associated with it. - // This handles alias resolution as well as data stream resolution. - Index writeIndex = indexAbstraction.getWriteIndex(); - assert writeIndex != null : "Could not resolve write index for resource [" + indexName + "]"; - IndexAbstraction writeAbstraction = metadata.getIndicesLookup().get(writeIndex.getName()); - DataStream targetDataStream = writeAbstraction.getParentDataStream(); + // We only store failures if the failure is being written to a data stream, + // not when directly writing to backing indices/failure stores + DataStream targetDataStream = DataStream.resolveDataStream(indexAbstraction, metadata); // We will store the failure if the write target belongs to a data stream with a failure store. - return Optional.of(targetDataStream != null && targetDataStream.isFailureStoreEnabled()); + return targetDataStream != null && targetDataStream.isFailureStoreEnabled(); } /** @@ -599,9 +621,9 @@ private static Optional resolveFailureStoreFromMetadata(String indexNam * @param metadata Cluster state metadata. * @return true if the given index name corresponds to an index template with a data stream failure store enabled. */ - private static Optional resolveFailureStoreFromTemplate(String indexName, Metadata metadata) { + private static Boolean resolveFailureStoreFromTemplate(String indexName, Metadata metadata) { if (indexName == null) { - return Optional.empty(); + return null; } // Check to see if the index name matches any templates such that an index would have been attributed @@ -612,11 +634,11 @@ private static Optional resolveFailureStoreFromTemplate(String indexNam ComposableIndexTemplate composableIndexTemplate = metadata.templatesV2().get(template); if (composableIndexTemplate.getDataStreamTemplate() != null) { // Check if the data stream has the failure store enabled - return Optional.of(composableIndexTemplate.getDataStreamTemplate().hasFailureStore()); + return composableIndexTemplate.getDataStreamTemplate().hasFailureStore(); } } // Could not locate a failure store via template - return Optional.empty(); + return null; } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java index a4648a7accb5a..2312a75b91084 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -166,8 +166,8 @@ protected IngestService getIngestService(BulkRequest request) { } @Override - protected boolean shouldStoreFailure(String indexName, Metadata metadata, long epochMillis) { + protected Boolean resolveFailureStore(String indexName, Metadata metadata, long epochMillis) { // A simulate bulk request should not change any persistent state in the system, so we never write to the failure store - return false; + return null; } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 6b20399a1bc59..c9743c157a622 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -1376,6 +1376,25 @@ private static Instant getTimestampFromParser(BytesReference source, XContentTyp } } + /** + * Resolve the index abstraction to a data stream. This handles alias resolution as well as data stream resolution. This does NOT + * resolve a data stream by providing a concrete backing index. + */ + public static DataStream resolveDataStream(IndexAbstraction indexAbstraction, Metadata metadata) { + // We do not consider concrete indices - only data streams and data stream aliases. + if (indexAbstraction == null || indexAbstraction.isDataStreamRelated() == false) { + return null; + } + + // Locate the write index for the abstraction, and check if it has a data stream associated with it. + Index writeIndex = indexAbstraction.getWriteIndex(); + if (writeIndex == null) { + return null; + } + IndexAbstraction writeAbstraction = metadata.getIndicesLookup().get(writeIndex.getName()); + return writeAbstraction.getParentDataStream(); + } + /** * Modifies the passed Instant object to be used as a bound for a timestamp field in TimeSeries. It needs to be called in both backing * index construction (rollover) and index selection for doc insertion. Failure to do so may lead to errors due to document timestamps diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 0b1a135a17214..20f97e1871483 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.util.Strings; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; @@ -18,6 +19,7 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.bulk.TransportBulkAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.ingest.DeletePipelineRequest; @@ -88,6 +90,7 @@ import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.IntConsumer; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -117,6 +120,7 @@ public class IngestService implements ClusterStateApplier, ReportingService pipelines = Map.of(); private final ThreadPool threadPool; private final IngestMetric totalMetrics = new IngestMetric(); + private final FailureStoreMetrics failureStoreMetrics; private final List> ingestClusterStateListeners = new CopyOnWriteArrayList<>(); private volatile ClusterState state; @@ -190,7 +194,8 @@ public IngestService( List ingestPlugins, Client client, MatcherWatchdog matcherWatchdog, - DocumentParsingProvider documentParsingProvider + DocumentParsingProvider documentParsingProvider, + FailureStoreMetrics failureStoreMetrics ) { this.clusterService = clusterService; this.scriptService = scriptService; @@ -212,6 +217,7 @@ public IngestService( ); this.threadPool = threadPool; this.taskQueue = clusterService.createTaskQueue("ingest-pipelines", Priority.NORMAL, PIPELINE_TASK_EXECUTOR); + this.failureStoreMetrics = failureStoreMetrics; } /** @@ -228,6 +234,7 @@ public IngestService( this.taskQueue = ingestService.taskQueue; this.pipelines = ingestService.pipelines; this.state = ingestService.state; + this.failureStoreMetrics = ingestService.failureStoreMetrics; } private static Map processorFactories(List ingestPlugins, Processor.Parameters parameters) { @@ -691,7 +698,7 @@ private static IngestPipelinesExecutionResult failAndStoreFor(String index, Exce * @param actionRequests The collection of requests to be processed. * @param onDropped A callback executed when a document is dropped by a pipeline. * Accepts the slot in the collection of requests that the document occupies. - * @param shouldStoreFailure A predicate executed on each ingest failure to determine if the + * @param resolveFailureStore A function executed on each ingest failure to determine if the * failure should be stored somewhere. * @param onStoreFailure A callback executed when a document fails ingest but the failure should * be persisted elsewhere. Accepts the slot in the collection of requests @@ -709,7 +716,7 @@ public void executeBulkRequest( final int numberOfActionRequests, final Iterable> actionRequests, final IntConsumer onDropped, - final Predicate shouldStoreFailure, + final Function resolveFailureStore, final TriConsumer onStoreFailure, final BiConsumer onFailure, final BiConsumer onCompletion, @@ -794,7 +801,7 @@ public void onFailure(Exception e) { } ); - executePipelines(pipelines, indexRequest, ingestDocument, shouldStoreFailure, documentListener); + executePipelines(pipelines, indexRequest, ingestDocument, resolveFailureStore, documentListener); indexRequest.setNormalisedBytesParsed(meteringParserDecorator.meteredDocumentSize().ingestedBytes()); assert actionRequest.index() != null; @@ -885,7 +892,7 @@ private void executePipelines( final PipelineIterator pipelines, final IndexRequest indexRequest, final IngestDocument ingestDocument, - final Predicate shouldStoreFailure, + final Function resolveFailureStore, final ActionListener listener ) { assert pipelines.hasNext(); @@ -898,9 +905,22 @@ private void executePipelines( ingestDocument.resetReroute(); final String originalIndex = indexRequest.indices()[0]; final Consumer exceptionHandler = (Exception e) -> { - if (shouldStoreFailure.test(originalIndex)) { + String errorType = ElasticsearchException.getExceptionName(ExceptionsHelper.unwrapCause(e)); + // If `failureStoreResolution` is true, we store the failure. If it's false, the target is a data stream, + // but it doesn't have the failure store enabled. If it's null, the target wasn't a data stream. + Boolean failureStoreResolution = resolveFailureStore.apply(originalIndex); + if (failureStoreResolution != null && failureStoreResolution) { + failureStoreMetrics.incrementFailureStore(originalIndex, errorType, FailureStoreMetrics.ErrorLocation.PIPELINE); listener.onResponse(IngestPipelinesExecutionResult.failAndStoreFor(originalIndex, e)); } else { + if (failureStoreResolution != null) { + // If this document targeted a data stream that didn't have the failure store enabled, we increment + // the rejected counter. + // We also increment the total counter because this request will not reach the code that increments + // the total counter for non-rejected documents. + failureStoreMetrics.incrementTotal(originalIndex); + failureStoreMetrics.incrementRejected(originalIndex, errorType, FailureStoreMetrics.ErrorLocation.PIPELINE, false); + } listener.onFailure(e); } }; @@ -928,6 +948,20 @@ private void executePipelines( } if (keep == false) { + // We only increment the total counter for dropped docs here, because these docs don't reach the code + // that ordinarily take care of that. + // We reuse `resolveFailureStore` here to determine whether the index request targets a data stream, + // because we only want to track these metrics for data streams. + Boolean failureStoreResolution = resolveFailureStore.apply(originalIndex); + if (failureStoreResolution != null) { + // Get index abstraction, resolving date math if it exists + IndexAbstraction indexAbstraction = state.metadata() + .getIndicesLookup() + .get(IndexNameExpressionResolver.resolveDateMathExpression(originalIndex, threadPool.absoluteTimeInMillis())); + DataStream dataStream = DataStream.resolveDataStream(indexAbstraction, state.metadata()); + String dataStreamName = dataStream != null ? dataStream.getName() : originalIndex; + failureStoreMetrics.incrementTotal(dataStreamName); + } listener.onResponse(IngestPipelinesExecutionResult.DISCARD_RESULT); return; // document dropped! } @@ -1019,7 +1053,7 @@ private void executePipelines( } if (newPipelines.hasNext()) { - executePipelines(newPipelines, indexRequest, ingestDocument, shouldStoreFailure, listener); + executePipelines(newPipelines, indexRequest, ingestDocument, resolveFailureStore, listener); } else { // update the index request's source and (potentially) cache the timestamp for TSDB updateIndexRequestSource(indexRequest, ingestDocument); diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index a4db9a0a0e149..9c5b72a573d44 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.repositories.reservedstate.ReservedRepositoryAction; import org.elasticsearch.action.admin.indices.template.reservedstate.ReservedComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService; import org.elasticsearch.action.ingest.ReservedPipelineAction; import org.elasticsearch.action.search.SearchExecutionStatsCollector; @@ -659,6 +660,7 @@ private void construct( modules.bindToInstance(DocumentParsingProvider.class, documentParsingProvider); + FailureStoreMetrics failureStoreMetrics = new FailureStoreMetrics(telemetryProvider.getMeterRegistry()); final IngestService ingestService = new IngestService( clusterService, threadPool, @@ -668,7 +670,8 @@ private void construct( pluginsService.filterPlugins(IngestPlugin.class).toList(), client, IngestService.createGrokThreadWatchdog(environment, threadPool), - documentParsingProvider + documentParsingProvider, + failureStoreMetrics ); SystemIndices systemIndices = createSystemIndices(settings); @@ -1154,6 +1157,7 @@ record PluginServiceInstances( b.bind(FileSettingsService.class).toInstance(fileSettingsService); b.bind(CompatibilityVersions.class).toInstance(compatibilityVersions); b.bind(DataStreamAutoShardingService.class).toInstance(dataStreamAutoShardingService); + b.bind(FailureStoreMetrics.class).toInstance(failureStoreMetrics); }); if (ReadinessService.enabled(environment)) { diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java index 76bf8dc79b855..e950901a538b4 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java @@ -1164,7 +1164,8 @@ private BulkOperation newBulkOperation( timeZero, listener, observer, - failureStoreDocumentConverter + failureStoreDocumentConverter, + FailureStoreMetrics.NOOP ); } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index 4ca4e7158e454..1d3d514da13a3 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -130,7 +130,8 @@ public boolean hasIndexAbstraction(String indexAbstraction, ClusterState state) mock(ActionFilters.class), indexNameExpressionResolver, new IndexingPressure(Settings.EMPTY), - EmptySystemIndices.INSTANCE + EmptySystemIndices.INSTANCE, + FailureStoreMetrics.NOOP ) { @Override void executeBulk( diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index 3683c2c271739..609237f268807 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -69,7 +69,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; -import java.util.function.Predicate; +import java.util.function.Function; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.sameInstance; @@ -110,7 +110,7 @@ public class TransportBulkActionIngestTests extends ESTestCase { /** Arguments to callbacks we want to capture, but which require generics, so we must use @Captor */ @Captor - ArgumentCaptor> redirectPredicate; + ArgumentCaptor> redirectPredicate; @Captor ArgumentCaptor> redirectHandler; @Captor @@ -155,7 +155,8 @@ class TestTransportBulkAction extends TransportBulkAction { new ActionFilters(Collections.emptySet()), TestIndexNameExpressionResolver.newInstance(), new IndexingPressure(SETTINGS), - EmptySystemIndices.INSTANCE + EmptySystemIndices.INSTANCE, + FailureStoreMetrics.NOOP ); } @@ -410,9 +411,10 @@ public void testIngestLocal() throws Exception { Iterator> req = bulkDocsItr.getValue().iterator(); failureHandler.getValue().accept(0, exception); // have an exception for our one index request indexRequest2.setPipeline(IngestService.NOOP_PIPELINE_NAME); // this is done by the real pipeline execution service when processing - assertTrue(redirectPredicate.getValue().test(WITH_FAILURE_STORE_ENABLED + "-1")); // ensure redirects on failure store data stream - assertFalse(redirectPredicate.getValue().test(WITH_DEFAULT_PIPELINE)); // no redirects for random existing indices - assertFalse(redirectPredicate.getValue().test("index")); // no redirects for non-existant indices with no templates + // ensure redirects on failure store data stream + assertTrue(redirectPredicate.getValue().apply(WITH_FAILURE_STORE_ENABLED + "-1")); + assertNull(redirectPredicate.getValue().apply(WITH_DEFAULT_PIPELINE)); // no redirects for random existing indices + assertNull(redirectPredicate.getValue().apply("index")); // no redirects for non-existent indices with no templates redirectHandler.getValue().apply(2, WITH_FAILURE_STORE_ENABLED + "-1", exception); // exception and redirect for request 3 (slot 2) completionHandler.getValue().accept(DUMMY_WRITE_THREAD, null); // all ingestion completed assertTrue(action.isExecuted); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java index db3a985c00ad0..ed7cc93f0ab43 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java @@ -71,6 +71,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; import static org.junit.Assume.assumeThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -103,7 +104,8 @@ class TestTransportBulkAction extends TransportBulkAction { new ActionFilters(Collections.emptySet()), new Resolver(), new IndexingPressure(Settings.EMPTY), - EmptySystemIndices.INSTANCE + EmptySystemIndices.INSTANCE, + FailureStoreMetrics.NOOP ); } @@ -417,13 +419,16 @@ public void testResolveFailureStoreFromMetadata() throws Exception { .build(); // Data stream with failure store should store failures - assertThat(TransportBulkAction.shouldStoreFailureInternal(dataStreamWithFailureStore, metadata, testTime), is(true)); + assertThat(TransportBulkAction.resolveFailureInternal(dataStreamWithFailureStore, metadata, testTime), is(true)); // Data stream without failure store should not - assertThat(TransportBulkAction.shouldStoreFailureInternal(dataStreamWithoutFailureStore, metadata, testTime), is(false)); + assertThat(TransportBulkAction.resolveFailureInternal(dataStreamWithoutFailureStore, metadata, testTime), is(false)); // An index should not be considered for failure storage - assertThat(TransportBulkAction.shouldStoreFailureInternal(backingIndex1.getIndex().getName(), metadata, testTime), is(false)); + assertThat(TransportBulkAction.resolveFailureInternal(backingIndex1.getIndex().getName(), metadata, testTime), is(nullValue())); // even if that index is itself a failure store - assertThat(TransportBulkAction.shouldStoreFailureInternal(failureStoreIndex1.getIndex().getName(), metadata, testTime), is(false)); + assertThat( + TransportBulkAction.resolveFailureInternal(failureStoreIndex1.getIndex().getName(), metadata, testTime), + is(nullValue()) + ); } public void testResolveFailureStoreFromTemplate() throws Exception { @@ -454,11 +459,11 @@ public void testResolveFailureStoreFromTemplate() throws Exception { .build(); // Data stream with failure store should store failures - assertThat(TransportBulkAction.shouldStoreFailureInternal(dsTemplateWithFailureStore + "-1", metadata, testTime), is(true)); + assertThat(TransportBulkAction.resolveFailureInternal(dsTemplateWithFailureStore + "-1", metadata, testTime), is(true)); // Data stream without failure store should not - assertThat(TransportBulkAction.shouldStoreFailureInternal(dsTemplateWithoutFailureStore + "-1", metadata, testTime), is(false)); + assertThat(TransportBulkAction.resolveFailureInternal(dsTemplateWithoutFailureStore + "-1", metadata, testTime), is(false)); // An index template should not be considered for failure storage - assertThat(TransportBulkAction.shouldStoreFailureInternal(indexTemplate + "-1", metadata, testTime), is(false)); + assertThat(TransportBulkAction.resolveFailureInternal(indexTemplate + "-1", metadata, testTime), is(nullValue())); } private BulkRequest buildBulkRequest(List indices) { diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java index 09513351652b8..626f07fe61216 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java @@ -254,7 +254,8 @@ static class TestTransportBulkAction extends TransportBulkAction { indexNameExpressionResolver, new IndexingPressure(Settings.EMPTY), EmptySystemIndices.INSTANCE, - relativeTimeProvider + relativeTimeProvider, + FailureStoreMetrics.NOOP ); } } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java index aea3359e18bf6..b620495472e28 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -90,7 +91,8 @@ public void setup() { Collections.singletonList(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); Map factories = ingestService.getProcessorFactories(); assertTrue(factories.containsKey("set")); diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index bc81614c9e237..5c07c2344cf13 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.bulk.TransportBulkAction; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -88,9 +89,9 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.IntConsumer; import java.util.function.LongSupplier; -import java.util.function.Predicate; import java.util.stream.Collectors; import static org.elasticsearch.cluster.service.ClusterStateTaskExecutorUtils.executeAndAssertSuccessful; @@ -152,7 +153,8 @@ public void testIngestPlugin() { List.of(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); Map factories = ingestService.getProcessorFactories(); assertTrue(factories.containsKey("foo")); @@ -172,7 +174,8 @@ public void testIngestPluginDuplicate() { List.of(DUMMY_PLUGIN, DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ) ); assertTrue(e.getMessage(), e.getMessage().contains("already registered")); @@ -189,7 +192,8 @@ public void testExecuteIndexPipelineDoesNotExist() { List.of(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); final IndexRequest indexRequest = new IndexRequest("_index").id("_id") .source(Map.of()) @@ -1665,7 +1669,7 @@ public void testExecuteFailureRedirection() throws Exception { .setFinalPipeline("_id2"); doThrow(new RuntimeException()).when(processor) .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Map.of()), any()); - final Predicate redirectCheck = (idx) -> indexRequest.index().equals(idx); + final Function redirectCheck = (idx) -> indexRequest.index().equals(idx); @SuppressWarnings("unchecked") final TriConsumer redirectHandler = mock(TriConsumer.class); @SuppressWarnings("unchecked") @@ -1722,7 +1726,7 @@ public void testExecuteFailureRedirectionWithNestedOnFailure() throws Exception .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Map.of()), any()); doThrow(new RuntimeException()).when(processor) .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Map.of()), any()); - final Predicate redirectPredicate = (idx) -> indexRequest.index().equals(idx); + final Function redirectCheck = (idx) -> indexRequest.index().equals(idx); @SuppressWarnings("unchecked") final TriConsumer redirectHandler = mock(TriConsumer.class); @SuppressWarnings("unchecked") @@ -1733,7 +1737,7 @@ public void testExecuteFailureRedirectionWithNestedOnFailure() throws Exception 1, List.of(indexRequest), indexReq -> {}, - redirectPredicate, + redirectCheck, redirectHandler, failureHandler, completionHandler, @@ -1826,9 +1830,9 @@ public void testBulkRequestExecution() throws Exception { for (int i = 0; i < numRequest; i++) { IndexRequest indexRequest = new IndexRequest("_index").id("_id").setPipeline(pipelineId).setFinalPipeline("_none"); indexRequest.source(xContentType, "field1", "value1"); - boolean shouldListExecutedPipelines = randomBoolean(); - executedPipelinesExpected.add(shouldListExecutedPipelines); - indexRequest.setListExecutedPipelines(shouldListExecutedPipelines); + boolean shouldListExecutedPiplines = randomBoolean(); + executedPipelinesExpected.add(shouldListExecutedPiplines); + indexRequest.setListExecutedPipelines(shouldListExecutedPiplines); bulkRequest.add(indexRequest); } @@ -2320,7 +2324,8 @@ public Map getProcessors(Processor.Parameters paramet List.of(testPlugin), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); ingestService.addIngestClusterStateListener(ingestClusterStateListener); @@ -2675,7 +2680,8 @@ private void testUpdatingPipeline(String pipelineString) throws Exception { List.of(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, clusterState)); @@ -2974,7 +2980,8 @@ public Map getProcessors(final Processor.Parameters p }), client, null, - documentParsingProvider + documentParsingProvider, + FailureStoreMetrics.NOOP ); if (randomBoolean()) { /* diff --git a/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java index 30145ab37c322..18f66676cfd1f 100644 --- a/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.ingest; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.bulk.SimulateBulkRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; @@ -115,11 +116,23 @@ private static IngestService createWithProcessors(Map ThreadPool threadPool = mock(ThreadPool.class); when(threadPool.generic()).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); when(threadPool.executor(anyString())).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); - return new IngestService(mock(ClusterService.class), threadPool, null, null, null, List.of(new IngestPlugin() { + var ingestPlugin = new IngestPlugin() { @Override public Map getProcessors(final Processor.Parameters parameters) { return processors; } - }), client, null, DocumentParsingProvider.EMPTY_INSTANCE); + }; + return new IngestService( + mock(ClusterService.class), + threadPool, + null, + null, + null, + List.of(ingestPlugin), + client, + null, + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP + ); } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index b54a786e05c9d..c6086a8259fbb 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.bulk.TransportBulkAction; import org.elasticsearch.action.bulk.TransportShardBulkAction; import org.elasticsearch.action.index.IndexRequest; @@ -2395,14 +2396,16 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { Collections.emptyList(), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ), mockFeatureService, client, actionFilters, indexNameExpressionResolver, new IndexingPressure(settings), - EmptySystemIndices.INSTANCE + EmptySystemIndices.INSTANCE, + FailureStoreMetrics.NOOP ) ); final TransportShardBulkAction transportShardBulkAction = new TransportShardBulkAction( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java index f10df86cc23ae..9232d32e40a97 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -136,7 +137,8 @@ public void setUpVariables() { Collections.singletonList(SKINNY_INGEST_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); } From fe786b7b8f4a13e19751b1460b6f4fef90891156 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 22 Aug 2024 09:07:42 +0200 Subject: [PATCH 138/389] Force implementing bulk InputStream#read on StreamInput (#112072) We should enforce overriding here to avoid extremely slow byte-by-byte reads when using these instances as `InputStream`. I only found one case where this matters practically in the codebase but it's probably good to guard against it. --- .../common/io/stream/ByteArrayStreamInput.java | 7 +++++++ .../common/io/stream/FilterStreamInput.java | 5 +++++ .../org/elasticsearch/common/io/stream/StreamInput.java | 4 ++++ .../index/translog/BufferedChecksumStreamInput.java | 9 +++++++++ .../elasticsearch/common/io/stream/StreamInputTests.java | 9 +++++++++ .../xpack/eql/execution/sample/CircuitBreakerTests.java | 5 +++++ 6 files changed, 39 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java index 52eee5af3f6f5..838f2998d339f 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java @@ -117,4 +117,11 @@ public void readBytes(byte[] b, int offset, int len) { System.arraycopy(bytes, pos, b, offset, len); pos += len; } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + int toRead = Math.min(len, available()); + readBytes(b, off, toRead); + return toRead; + } } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java index c0ef0e0abf39b..b84c67bd8c8a2 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java @@ -97,6 +97,11 @@ public int read() throws IOException { return delegate.read(); } + @Override + public int read(byte[] b, int off, int len) throws IOException { + return delegate.read(b, off, len); + } + @Override public void close() throws IOException { delegate.close(); diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index c4c18cfd376ad..ec0edb2d07e5a 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -104,6 +104,10 @@ public void setTransportVersion(TransportVersion version) { */ public abstract void readBytes(byte[] b, int offset, int len) throws IOException; + // force implementing bulk reads to avoid accidentally slow implementations + @Override + public abstract int read(byte[] b, int off, int len) throws IOException; + /** * Reads a bytes reference from this stream, copying any bytes read to a new {@code byte[]}. Use {@link #readReleasableBytesReference()} * when reading large bytes references where possible top avoid needless allocations and copying. diff --git a/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java b/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java index 6d1456040c8fa..9420d923107e1 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java @@ -66,6 +66,15 @@ public void readBytes(byte[] b, int offset, int len) throws IOException { digest.update(b, offset, len); } + @Override + public int read(byte[] b, int off, int len) throws IOException { + int read = delegate.read(b, off, len); + if (read > 0) { + digest.update(b, off, read); + } + return read; + } + private static final ThreadLocal buffer = ThreadLocal.withInitial(() -> new byte[8]); @Override diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java index 645461778f637..cda1f9b0e29de 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java @@ -24,6 +24,15 @@ public class StreamInputTests extends ESTestCase { private StreamInput in = Mockito.spy(StreamInput.class); + + { + try { + Mockito.when(in.skip(anyLong())).thenAnswer(a -> a.getArguments()[0]); + } catch (IOException e) { + throw new AssertionError(e); + } + } + byte[] bytes = "0123456789".getBytes(UTF_8); public void testCalculateByteLengthOfAscii() throws IOException { diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java index 943d1275364fb..1652495197fc0 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java @@ -260,6 +260,11 @@ public void readBytes(byte[] b, int offset, int len) throws IOException { } + @Override + public int read(byte[] b, int off, int len) throws IOException { + return 0; + } + @Override public void close() throws IOException { From 967af10d58ac673645da64d9b37e23645fe45daf Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 22 Aug 2024 00:08:38 -0700 Subject: [PATCH 139/389] Fix DocValuesCodecDuelTests testDuel (#112084) We need to check the returned doc id from advance() before accessing the values of the current document. Closes #112082 --- muted-tests.yml | 3 --- .../index/codec/tsdb/DocValuesCodecDuelTests.java | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 96fa1c674a27e..bb13c2fc9a571 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -176,9 +176,6 @@ tests: - class: org.elasticsearch.xpack.esql.qa.single_node.RestEsqlIT method: testForceSleepsProfile {ASYNC} issue: https://github.com/elastic/elasticsearch/issues/112049 -- class: org.elasticsearch.index.codec.tsdb.DocValuesCodecDuelTests - method: testDuel - issue: https://github.com/elastic/elasticsearch/issues/112082 - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/80_random_rerank_retriever/Random rerank retriever predictably shuffles results} issue: https://github.com/elastic/elasticsearch/issues/111999 diff --git a/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java b/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java index 9b58e785131c9..20ae59e113c33 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java @@ -141,6 +141,9 @@ private void assertSortedDocValues(LeafReader baselineReader, LeafReader contend for (int i = 0; i < docIdsToAdvanceTo.length; i++) { int docId = docIdsToAdvanceTo[i]; int baselineTarget = assertAdvance(docId, baselineReader, contenderReader, baseline, contender); + if (baselineTarget == NO_MORE_DOCS) { + break; + } assertEquals(baseline.ordValue(), contender.ordValue()); assertEquals(baseline.lookupOrd(baseline.ordValue()), contender.lookupOrd(contender.ordValue())); i = shouldSkipDocIds(i, docId, baselineTarget, docIdsToAdvanceTo); From 31cdc432869f4b53890d939c942270cb9eb77030 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 22 Aug 2024 18:08:41 +1000 Subject: [PATCH 140/389] Mute org.elasticsearch.xpack.ml.integration.MlJobIT testDeleteJobAfterMissingIndex #112088 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index bb13c2fc9a571..cd484b1c46867 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -179,6 +179,9 @@ tests: - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/80_random_rerank_retriever/Random rerank retriever predictably shuffles results} issue: https://github.com/elastic/elasticsearch/issues/111999 +- class: org.elasticsearch.xpack.ml.integration.MlJobIT + method: testDeleteJobAfterMissingIndex + issue: https://github.com/elastic/elasticsearch/issues/112088 # Examples: # From fb6c5a55dd7a7973841307545a204b4bfd35672a Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Thu, 22 Aug 2024 12:15:22 +0200 Subject: [PATCH 141/389] Update Gradle wrapper to 8.10 (#111736) --- .../gradle/wrapper/gradle-wrapper.properties | 4 ++-- .../src/main/resources/minimumGradleVersion | 2 +- gradle/wrapper/gradle-wrapper.jar | Bin 43504 -> 43583 bytes gradle/wrapper/gradle-wrapper.properties | 4 ++-- .../gradle/wrapper/gradle-wrapper.properties | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties index efe2ff3449216..9036682bf0f0c 100644 --- a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties +++ b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip +distributionSha256Sum=682b4df7fe5accdca84a4d1ef6a3a6ab096b3efd5edf7de2bd8c758d95a93703 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/build-tools-internal/src/main/resources/minimumGradleVersion b/build-tools-internal/src/main/resources/minimumGradleVersion index f7b1c8ff61774..8d04a0f38fab0 100644 --- a/build-tools-internal/src/main/resources/minimumGradleVersion +++ b/build-tools-internal/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -8.9 \ No newline at end of file +8.10 \ No newline at end of file diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 2c3521197d7c4586c843d1d3e9090525f1898cde..a4b76b9530d66f5e68d973ea569d8e19de379189 100644 GIT binary patch delta 3990 zcmV;H4{7l5(*nQL0Kr1kzC=_KMxQY0|W5(lc#i zH*M1^P4B}|{x<+fkObwl)u#`$GxKKV&3pg*-y6R6txw)0qU|Clf9Uds3x{_-**c=7 z&*)~RHPM>Rw#Hi1R({;bX|7?J@w}DMF>dQQU2}9yj%iLjJ*KD6IEB2^n#gK7M~}6R zkH+)bc--JU^pV~7W=3{E*4|ZFpDpBa7;wh4_%;?XM-5ZgZNnVJ=vm!%a2CdQb?oTa z70>8rTb~M$5Tp!Se+4_OKWOB1LF+7gv~$$fGC95ToUM(I>vrd$>9|@h=O?eARj0MH zT4zo(M>`LWoYvE>pXvqG=d96D-4?VySz~=tPVNyD$XMshoTX(1ZLB5OU!I2OI{kb) zS8$B8Qm>wLT6diNnyJZC?yp{Kn67S{TCOt-!OonOK7$K)e-13U9GlnQXPAb&SJ0#3 z+vs~+4Qovv(%i8g$I#FCpCG^C4DdyQw3phJ(f#y*pvNDQCRZ~MvW<}fUs~PL=4??j zmhPyg<*I4RbTz|NHFE-DC7lf2=}-sGkE5e!RM%3ohM7_I^IF=?O{m*uUPH(V?gqyc(Rp?-Qu(3bBIL4Fz(v?=_Sh?LbK{nqZMD>#9D_hNhaV$0ef3@9V90|0u#|PUNTO>$F=qRhg1duaE z0`v~X3G{8RVT@kOa-pU+z8{JWyP6GF*u2e8eKr7a2t1fuqQy)@d|Qn(%YLZ62TWtoX@$nL}9?atE#Yw`rd(>cr0gY;dT9~^oL;u)zgHUvxc2I*b&ZkGM-iq=&(?kyO(3}=P! zRp=rErEyMT5UE9GjPHZ#T<`cnD)jyIL!8P{H@IU#`e8cAG5jMK zVyKw7--dAC;?-qEu*rMr$5@y535qZ6p(R#+fLA_)G~!wnT~~)|s`}&fA(s6xXN`9j zP#Fd3GBa#HeS{5&8p?%DKUyN^X9cYUc6vq}D_3xJ&d@=6j(6BZKPl?!k1?!`f3z&a zR4ZF60Mx7oBxLSxGuzA*Dy5n-d2K=+)6VMZh_0KetK|{e;E{8NJJ!)=_E~1uu=A=r zrn&gh)h*SFhsQJo!f+wKMIE;-EOaMSMB@aXRU(UcnJhZW^B^mgs|M9@5WF@s6B0p& zm#CTz)yiQCgURE{%hjxHcJ6G&>G9i`7MyftL!QQd5 z@RflRs?7)99?X`kHNt>W3l7YqscBpi*R2+fsgABor>KVOu(i(`03aytf2UA!&SC9v z!E}whj#^9~=XHMinFZ;6UOJjo=mmNaWkv~nC=qH9$s-8roGeyaW-E~SzZ3Gg>j zZ8}<320rg4=$`M0nxN!w(PtHUjeeU?MvYgWKZ6kkzABK;vMN0|U;X9abJleJA(xy<}5h5P(5 z{RzAFPvMnX2m0yH0Jn2Uo-p`daE|(O`YQiC#jB8;6bVIUf?SY(k$#C0`d6qT`>Xe0+0}Oj0=F&*D;PVe=Z<=0AGI<6$gYLwa#r` zm449x*fU;_+J>Mz!wa;T-wldoBB%&OEMJgtm#oaI60TSYCy7;+$5?q!zi5K`u66Wq zvg)Fx$s`V3Em{=OEY{3lmh_7|08ykS&U9w!kp@Ctuzqe1JFOGz6%i5}Kmm9>^=gih z?kRxqLA<3@e=}G4R_?phW{4DVr?`tPfyZSN@R=^;P;?!2bh~F1I|fB7P=V=9a6XU5 z<#0f>RS0O&rhc&nTRFOW7&QhevP0#>j0eq<1@D5yAlgMl5n&O9X|Vq}%RX}iNyRFF z7sX&u#6?E~bm~N|z&YikXC=I0E*8Z$v7PtWfjy)$e_Ez25fnR1Q=q1`;U!~U>|&YS zaOS8y!^ORmr2L4ik!IYR8@Dcx8MTC=(b4P6iE5CnrbI~7j7DmM8em$!da&D!6Xu)!vKPdLG z9f#)se|6=5yOCe)N6xDhPI!m81*dNe7u985zi%IVfOfJh69+#ag4ELzGne?o`eA`42K4T)h3S+s)5IT97%O>du- z0U54L8m4}rkRQ?QBfJ%DLssy^+a7Ajw;0&`NOTY4o;0-ivm9 zBz1C%nr_hQ)X)^QM6T1?=yeLkuG9Lf50(eH}`tFye;01&(p?8i+6h};VV-2B~qdxeC#=X z(JLlzy&fHkyi9Ksbcs~&r^%lh^2COldLz^H@X!s~mr9Dr6z!j+4?zkD@Ls7F8(t(f z9`U?P$Lmn*Y{K}aR4N&1N=?xtQ1%jqf1~pJyQ4SgBrEtR`j4lQuh7cqP49Em5cO=I zB(He2`iPN5M=Y0}h(IU$37ANTGx&|b-u1BYA*#dE(L-lptoOpo&th~E)_)y-`6kSH z3vvyVrcBwW^_XYReJ=JYd9OBQrzv;f2AQdZH#$Y{Y+Oa33M70XFI((fs;mB4e`<<{ ze4dv2B0V_?Ytsi>>g%qs*}oDGd5d(RNZ*6?7qNbdp7wP4T72=F&r?Ud#kZr8Ze5tB z_oNb7{G+(o2ajL$!69FW@jjPQ2a5C)m!MKKRirC$_VYIuVQCpf9rIms0GRDf)8AH${I`q^~5rjot@#3$2#zT2f`(N^P7Z;6(@EK$q*Jgif00I6*^ZGV+XB5uw*1R-@23yTw&WKD{s1;HTL;dO)%5i#`dc6b7;5@^{KU%N|A-$zsYw4)7LA{3`Zp>1 z-?K9_IE&z)dayUM)wd8K^29m-l$lFhi$zj0l!u~4;VGR6Y!?MAfBC^?QD53hy6VdD z@eUZIui}~L%#SmajaRq1J|#> z4m=o$vZ*34=ZWK2!QMNEcp2Lbc5N1q!lEDq(bz0b;WI9;e>l=CG9^n#ro`w>_0F$Q zfZ={2QyTkfByC&gy;x!r*NyXXbk=a%~~(#K?< zTke0HuF5{Q+~?@!KDXR|g+43$+;ab`^flS%miup_0OUTm=nIc%d5nLP)i308PIjl_YMF6cpQ__6&$n6it8K- z8PIjl_YMF6cpQ_!r)L8IivW`WdK8mBs6PXdjR2DYdK8nCs73=4j{uVadK8oNjwX|E wpAeHLsTu^*Y>Trk?aBtSQ(D-o$(D8Px^?ZI-PUB? z*1fv!{YdHme3Fc8%cR@*@zc5A_nq&2=R47Hp@$-JF4Fz*;SLw5}K^y>s-s;V!}b2i=5=M- zComP?ju>8Fe@=H@rlwe1l`J*6BTTo`9b$zjQ@HxrAhp0D#u?M~TxGC_!?ccCHCjt| zF*PgJf@kJB`|Ml}cmsyrAjO#Kjr^E5p29w+#>$C`Q|54BoDv$fQ9D?3n32P9LPMIzu?LjNqggOH=1@T{9bMn*u8(GI z!;MLTtFPHal^S>VcJdiYqX0VU|Rn@A}C1xOlxCribxes0~+n2 z6qDaIA2$?e`opx3_KW!rAgbpzU)gFdjAKXh|5w``#F0R|c)Y)Du0_Ihhz^S?k^pk% zP>9|pIDx)xHH^_~+aA=^$M!<8K~Hy(71nJGf6`HnjtS=4X4=Hk^O71oNia2V{HUCC zoN3RSBS?mZCLw;l4W4a+D8qc)XJS`pUJ5X-f^1ytxwr`@si$lAE?{4G|o; zO0l>`rr?;~c;{ZEFJ!!3=7=FdGJ?Q^xfNQh4A?i;IJ4}B+A?4olTK(fN++3CRBP97 ze~lG9h%oegkn)lpW-4F8o2`*WW0mZHwHez`ko@>U1_;EC_6ig|Drn@=DMV9YEUSCa zIf$kHei3(u#zm9I!Jf(4t`Vm1lltJ&lVHy(eIXE8sy9sUpmz%I_gA#8x^Zv8%w?r2 z{GdkX1SkzRIr>prRK@rqn9j2wG|rUvf6PJbbin=yy-TAXrguvzN8jL$hUrIXzr^s5 zVM?H4;eM-QeRFr06@ifV(ocvk?_)~N@1c2ien56UjWXid6W%6ievIh)>dk|rIs##^kY67ib8Kw%#-oVFaXG7$ERyA9(NSJUvWiOA5H(!{uOpcW zg&-?iqPhds%3%tFspHDqqr;A!e@B#iPQjHd=c>N1LoOEGRehVoPOdxJ>b6>yc#o#+ zl8s8!(|NMeqjsy@0x{8^j0d00SqRZjp{Kj)&4UHYGxG+z9b-)72I*&J70?+8e?p_@ z=>-(>l6z5vYlP~<2%DU02b!mA{7mS)NS_eLe=t)sm&+Pmk?asOEKlkPQ)EUvvfC=;4M&*|I!w}(@V_)eUKLA_t^%`o z0PM9LV|UKTLnk|?M3u!|f2S0?UqZsEIH9*NJS-8lzu;A6-rr-ot=dg9SASoluZUkFH$7X; zP=?kYX!K?JL-b~<#7wU;b;eS)O;@?h%sPPk{4xEBxb{!sm0AY|f9cNvx6>$3F!*0c z75H=dy8JvTyO8}g1w{$9T$p~5en}AeSLoCF>_RT9YPMpChUjl310o*$QocjbH& zbnwg#gssR#jDVN{uEi3n(PZ%PFZ|6J2 z5_rBf0-u>e4sFe0*Km49ATi7>Kn0f9!uc|rRMR1Dtt6m1LW8^>qFlo}h$@br=Rmpi z;mI&>OF64Be{dVeHI8utrh)v^wsZ0jii%x8UgZ8TC%K~@I(4E};GFW&(;WVov}3%H zH;IhRkfD^(vt^DjZz(MyHLZxv8}qzPc(%itBkBwf_fC~sDBgh<3XAv5cxxfF3<2U! z03Xe&z`is!JDHbe;mNmfkH+_LFE*I2^mdL@7(@9DfAcP6O04V-ko;Rpgp<%Cj5r8Z zd0`sXoIjV$j)--;jA6Zy^D5&5v$o^>e%>Q?9GLm{i~p^lAn!%ZtF$I~>39XVZxk0b zROh^Bk9cE0AJBLozZIEmy7xG(yHWGztvfnr0(2ro1%>zsGMS^EMu+S$r=_;9 zWwZkgf7Q7`H9sLf2Go^Xy6&h~a&%s2_T@_Csf19MntF$aVFiFkvE3_hUg(B@&Xw@YJ zpL$wNYf78=0c@!QU6_a$>CPiXT7QAGDM}7Z(0z#_ZA=fmLUj{2z7@Ypo71UDy8GHr z-&TLKf6a5WCf@Adle3VglBt4>Z>;xF}}-S~B7<(%B;Y z0QR55{z-buw>8ilNM3u6I+D$S%?)(p>=eBx-HpvZj{7c*_?K=d()*7q?93us}1dq%FAFYLsW8ZTQ_XZLh`P2*6(NgS}qGcfGXVWpwsp#Rs}IuKbk*`2}&) zI^Vsk6S&Q4@oYS?dJ`NwMVBs6f57+RxdqVub#PvMu?$=^OJy5xEl0<5SLsSRy%%a0 zi}Y#1-F3m;Ieh#Y12UgW?-R)|eX>ZuF-2cc!1>~NS|XSF-6In>zBoZg+ml!6%fk7U zw0LHcz8VQk(jOJ+Yu)|^|15ufl$KQd_1eUZZzj`aC%umU6F1&D5XVWce_wAe(qCSZ zpX-QF4e{EmEVN9~6%bR5U*UT{eMHfcUo`jw*u?4r2s_$`}U{?NjvEm(u&<>B|%mq$Q3weshxk z76<``8vh{+nX`@9CB6IE&z)I%IFjR^LH{s1p|eppv=x za(g_jLU|xjWMAn-V7th$f({|LG8zzIE0g?cyW;%Dmtv%C+0@xVxPE^ zyZzi9P%JAD6ynwHptuzP`Kox7*9h7XSMonCalv;Md0i9Vb-c*!f0ubfk?&T&T}AHh z4m8Bz{JllKcdNg?D^%a5MFQ;#1z|*}H^qHLzW)L}wp?2tY7RejtSh8<;Zw)QGJYUm z|MbTxyj*McKlStlT9I5XlSWtQGN&-LTr2XyNU+`490rg?LYLMRnz-@oKqT1hpCGqP zyRXt4=_Woj$%n5ee<3zhLF>5>`?m9a#xQH+Jk_+|RM8Vi;2*XbK- zEL6sCpaGPzP>k8f4Kh|##_imt#zJMB;ir|JrMPGW`rityK1vHXMLy18%qmMQAm4WZ zP)i30KR&5vs15)C+8dM66&$k~i|ZT;KR&5vs15)C+8dJ(sAmGPijyIz6_bsqKLSFH zlOd=TljEpH0>h4zA*dCTK&emy#FCRCs1=i^sZ9bFmXjf<6_X39E(XY)00000#N437 diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index efe2ff3449216..9036682bf0f0c 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip +distributionSha256Sum=682b4df7fe5accdca84a4d1ef6a3a6ab096b3efd5edf7de2bd8c758d95a93703 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/plugins/examples/gradle/wrapper/gradle-wrapper.properties b/plugins/examples/gradle/wrapper/gradle-wrapper.properties index efe2ff3449216..9036682bf0f0c 100644 --- a/plugins/examples/gradle/wrapper/gradle-wrapper.properties +++ b/plugins/examples/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip +distributionSha256Sum=682b4df7fe5accdca84a4d1ef6a3a6ab096b3efd5edf7de2bd8c758d95a93703 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME From 62305f018be973676fb65c26254b489dec3b6c89 Mon Sep 17 00:00:00 2001 From: kosabogi <105062005+kosabogi@users.noreply.github.com> Date: Thu, 22 Aug 2024 12:22:32 +0200 Subject: [PATCH 142/389] Updates-warning-about-mounting-snapshots (#112057) * Updates-warning-about-mounting-snapshots * Update docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc Co-authored-by: shainaraskas <58563081+shainaraskas@users.noreply.github.com> --------- Co-authored-by: shainaraskas <58563081+shainaraskas@users.noreply.github.com> --- .../searchable-snapshots/apis/mount-snapshot.asciidoc | 5 ++++- docs/reference/searchable-snapshots/index.asciidoc | 9 ++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc index 5d838eb86dcf3..b47bc2370ab10 100644 --- a/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc +++ b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc @@ -24,7 +24,10 @@ For more information, see <>. ==== {api-description-title} This API mounts a snapshot as a searchable snapshot index. -Note that manually mounting {ilm-init}-managed snapshots can <> with <>. + +Don't use this API for snapshots managed by {ilm-init}. Manually mounting +{ilm-init}-managed snapshots can <> with +<>. [[searchable-snapshots-api-mount-path-params]] ==== {api-path-parms-title} diff --git a/docs/reference/searchable-snapshots/index.asciidoc b/docs/reference/searchable-snapshots/index.asciidoc index a8a9ef36dc9a6..a38971a0bae6a 100644 --- a/docs/reference/searchable-snapshots/index.asciidoc +++ b/docs/reference/searchable-snapshots/index.asciidoc @@ -176,9 +176,12 @@ nodes that have a shared cache. ==== Manually mounting snapshots captured by an Index Lifecycle Management ({ilm-init}) policy can interfere with {ilm-init}'s automatic management. This may lead to issues such as data loss -or complications with snapshot handling. For optimal results, allow {ilm-init} to manage -snapshots automatically. If manual mounting is necessary, be aware of its potential -impact on {ilm-init} processes. For more information, learn about <>. +or complications with snapshot handling. + +For optimal results, allow {ilm-init} to manage +snapshots automatically. + +<>. ==== [[searchable-snapshots-shared-cache]] From 585bd64695b01b0aac37c0bb00bf53898a4ce358 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Thu, 22 Aug 2024 12:56:31 +0200 Subject: [PATCH 143/389] Add H3 Benchmarks (#111359) Microbenchmarks for H3 --- benchmarks/build.gradle | 1 + .../benchmark/h3/H3Benchmark.java | 46 +++++++++++++++++++ .../elasticsearch/benchmark/h3/H3State.java | 35 ++++++++++++++ 3 files changed, 82 insertions(+) create mode 100644 benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3Benchmark.java create mode 100644 benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3State.java diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 49e81a67e85f9..3f7ee8b60b53c 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -37,6 +37,7 @@ dependencies { // us to invoke the JMH uberjar as usual. exclude group: 'net.sf.jopt-simple', module: 'jopt-simple' } + api(project(':libs:elasticsearch-h3')) api(project(':modules:aggregations')) api(project(':x-pack:plugin:esql-core')) api(project(':x-pack:plugin:esql')) diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3Benchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3Benchmark.java new file mode 100644 index 0000000000000..2441acab7d405 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3Benchmark.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.benchmark.h3; + +import org.elasticsearch.h3.H3; +import org.openjdk.jmh.Main; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +@OutputTimeUnit(TimeUnit.SECONDS) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 25, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(1) +public class H3Benchmark { + + @Benchmark + public void pointToH3(H3State state, Blackhole bh) { + for (int i = 0; i < state.points.length; i++) { + for (int res = 0; res <= 15; res++) { + bh.consume(H3.geoToH3(state.points[i][0], state.points[i][1], res)); + } + } + } + + @Benchmark + public void h3Boundary(H3State state, Blackhole bh) { + for (int i = 0; i < state.h3.length; i++) { + bh.consume(H3.h3ToGeoBoundary(state.h3[i])); + } + } + + public static void main(String[] args) throws Exception { + Main.main(args); + } +} diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3State.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3State.java new file mode 100644 index 0000000000000..5707e692a0750 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3State.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.benchmark.h3; + +import org.elasticsearch.h3.H3; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; + +import java.io.IOException; +import java.util.Random; + +@State(Scope.Benchmark) +public class H3State { + + double[][] points = new double[1000][2]; + long[] h3 = new long[1000]; + + @Setup(Level.Trial) + public void setupTrial() throws IOException { + Random random = new Random(1234); + for (int i = 0; i < points.length; i++) { + points[i][0] = random.nextDouble() * 180 - 90; // lat + points[i][1] = random.nextDouble() * 360 - 180; // lon + int res = random.nextInt(16); // resolution + h3[i] = H3.geoToH3(points[i][0], points[i][1], res); + } + } +} From 1362d56865db488fb9e084ff3b3fe88c6f597b86 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Thu, 22 Aug 2024 15:13:52 +0300 Subject: [PATCH 144/389] Introduce mode `subobjects=auto` for objects (#110524) * Introduce mode `subobjects=auto` for objects * Update docs/changelog/110524.yaml * compilation error * tests and fixes * refactor * spotless * more tests * fix nested objects * fix test * update fetch test * add QA coverage * update tests * update tests * update tests * fix nested --- docs/changelog/110524.yaml | 5 + ...ogsIndexModeRandomDataChallengeRestIT.java | 19 +- .../test/index/92_metrics_auto_subobjects.yml | 250 ++++++++++++++ .../indices.create/20_synthetic_source.yml | 89 +++++ .../15_composition.yml | 109 ++++++ .../test/search/330_fetch_fields.yml | 52 +++ .../index/mapper/DocumentParser.java | 11 +- .../index/mapper/DynamicFieldsBuilder.java | 2 +- .../index/mapper/MapperFeatures.java | 3 +- .../index/mapper/NestedObjectMapper.java | 7 +- .../index/mapper/ObjectMapper.java | 131 ++++++-- .../index/mapper/PassThroughObjectMapper.java | 5 +- .../index/mapper/RootObjectMapper.java | 7 +- .../mapper/DynamicFieldsBuilderTests.java | 4 +- .../index/mapper/DynamicTemplatesTests.java | 310 +++++++++++++++++- .../FieldAliasMapperValidationTests.java | 3 +- .../index/mapper/MappingLookupTests.java | 3 +- .../index/mapper/ObjectMapperMergeTests.java | 74 ++--- .../index/mapper/ObjectMapperTests.java | 180 +++++++++- .../query/SearchExecutionContextTests.java | 4 +- .../index/mapper/MapperServiceTestCase.java | 7 +- .../mapper/SemanticTextFieldMapper.java | 4 +- 22 files changed, 1154 insertions(+), 125 deletions(-) create mode 100644 docs/changelog/110524.yaml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml diff --git a/docs/changelog/110524.yaml b/docs/changelog/110524.yaml new file mode 100644 index 0000000000000..6274c99b09998 --- /dev/null +++ b/docs/changelog/110524.yaml @@ -0,0 +1,5 @@ +pr: 110524 +summary: Introduce mode `subobjects=auto` for objects +area: Mapping +type: enhancement +issues: [] diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java index 4e123c1630457..f53fdcb6e8600 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.FormatNames; +import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.logsdb.datageneration.DataGenerator; import org.elasticsearch.logsdb.datageneration.DataGeneratorSpecification; import org.elasticsearch.logsdb.datageneration.FieldType; @@ -32,17 +33,17 @@ */ public class StandardVersusLogsIndexModeRandomDataChallengeRestIT extends StandardVersusLogsIndexModeChallengeRestIT { private final boolean fullyDynamicMapping; - private final boolean subobjectsDisabled; + private final ObjectMapper.Subobjects subobjects; private final DataGenerator dataGenerator; public StandardVersusLogsIndexModeRandomDataChallengeRestIT() { super(); this.fullyDynamicMapping = randomBoolean(); - this.subobjectsDisabled = randomBoolean(); + this.subobjects = randomFrom(ObjectMapper.Subobjects.values()); var specificationBuilder = DataGeneratorSpecification.builder(); - if (subobjectsDisabled) { + if (subobjects != ObjectMapper.Subobjects.ENABLED) { specificationBuilder = specificationBuilder.withNestedFieldsLimit(0); } this.dataGenerator = new DataGenerator(specificationBuilder.withDataSourceHandlers(List.of(new DataSourceHandler() { @@ -60,7 +61,7 @@ public DataSourceResponse.FieldTypeGenerator handle(DataSourceRequest.FieldTypeG } public DataSourceResponse.ObjectMappingParametersGenerator handle(DataSourceRequest.ObjectMappingParametersGenerator request) { - if (subobjectsDisabled == false) { + if (subobjects == ObjectMapper.Subobjects.ENABLED) { // Use default behavior return null; } @@ -71,13 +72,13 @@ public DataSourceResponse.ObjectMappingParametersGenerator handle(DataSourceRequ // "dynamic: false/strict/runtime" is not compatible with subobjects: false return new DataSourceResponse.ObjectMappingParametersGenerator(() -> { var parameters = new HashMap(); + parameters.put("subobjects", subobjects.toString()); if (ESTestCase.randomBoolean()) { parameters.put("dynamic", "true"); } if (ESTestCase.randomBoolean()) { parameters.put("enabled", "true"); } - return parameters; }); } @@ -106,15 +107,15 @@ public void baselineMappings(XContentBuilder builder) throws IOException { @Override public void contenderMappings(XContentBuilder builder) throws IOException { if (fullyDynamicMapping == false) { - if (subobjectsDisabled) { - dataGenerator.writeMapping(builder, b -> builder.field("subobjects", false)); + if (subobjects != ObjectMapper.Subobjects.ENABLED) { + dataGenerator.writeMapping(builder, b -> builder.field("subobjects", subobjects.toString())); } else { dataGenerator.writeMapping(builder); } } else { builder.startObject(); - if (subobjectsDisabled) { - builder.field("subobjects", false); + if (subobjects != ObjectMapper.Subobjects.ENABLED) { + builder.field("subobjects", subobjects.toString()); } builder.endObject(); } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml new file mode 100644 index 0000000000000..984c1c22b2177 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml @@ -0,0 +1,250 @@ +--- +"Metrics object indexing": + - requires: + test_runner_features: allowed_warnings_regex + cluster_features: ["mapper.subobjects_auto"] + reason: requires supporting subobjects auto setting + + - do: + indices.put_template: + name: test + body: + index_patterns: test-* + mappings: + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: auto + properties: + host.name: + type: keyword + + - do: + allowed_warnings_regex: + - "index \\[test-1\\] matches multiple legacy templates \\[global, test\\], composable templates will only match a single template" + index: + index: test-1 + id: 1 + refresh: true + body: + { metrics.host.name: localhost, metrics.host.id: 1, metrics.time: 10, metrics.time.max: 100, metrics.time.min: 1 } + + - do: + field_caps: + index: test-1 + fields: metrics* + - match: {fields.metrics\.host\.id.long.searchable: true} + - match: {fields.metrics\.host\.id.long.aggregatable: true} + - match: {fields.metrics\.host\.name.keyword.searchable: true} + - match: {fields.metrics\.host\.name.keyword.aggregatable: true} + - match: {fields.metrics\.time.long.searchable: true} + - match: {fields.metrics\.time.long.aggregatable: true} + - match: {fields.metrics\.time\.max.long.searchable: true} + - match: {fields.metrics\.time\.max.long.aggregatable: true} + - match: {fields.metrics\.time\.min.long.searchable: true} + - match: {fields.metrics\.time\.min.long.aggregatable: true} + + - do: + get: + index: test-1 + id: 1 + - match: {_index: "test-1"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + metrics.host.name: localhost + metrics.host.id: 1 + metrics.time: 10 + metrics.time.max: 100 + metrics.time.min: 1 + +--- +"Root with metrics": + - requires: + test_runner_features: allowed_warnings_regex + cluster_features: ["mapper.subobjects_auto"] + reason: requires supporting subobjects auto setting + + - do: + indices.put_template: + name: test + body: + index_patterns: test-* + mappings: + subobjects: auto + properties: + host.name: + type: keyword + + - do: + allowed_warnings_regex: + - "index \\[test-1\\] matches multiple legacy templates \\[global, test\\], composable templates will only match a single template" + index: + index: test-1 + id: 1 + refresh: true + body: + { host.name: localhost, host.id: 1, time: 10, time.max: 100, time.min: 1 } + + - do: + field_caps: + index: test-1 + fields: [host*, time*] + - match: {fields.host\.name.keyword.searchable: true} + - match: {fields.host\.name.keyword.aggregatable: true} + - match: {fields.host\.id.long.searchable: true} + - match: {fields.host\.id.long.aggregatable: true} + - match: {fields.time.long.searchable: true} + - match: {fields.time.long.aggregatable: true} + - match: {fields.time\.max.long.searchable: true} + - match: {fields.time\.max.long.aggregatable: true} + - match: {fields.time\.min.long.searchable: true} + - match: {fields.time\.min.long.aggregatable: true} + + - do: + get: + index: test-1 + id: 1 + - match: {_index: "test-1"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + host.name: localhost + host.id: 1 + time: 10 + time.max: 100 + time.min: 1 + +--- +"Metrics object indexing with synthetic source": + - requires: + test_runner_features: allowed_warnings_regex + cluster_features: ["mapper.subobjects_auto"] + reason: added in 8.4.0 + + - do: + indices.put_template: + name: test + body: + index_patterns: test-* + mappings: + _source: + mode: synthetic + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: auto + properties: + host.name: + type: keyword + + - do: + allowed_warnings_regex: + - "index \\[test-1\\] matches multiple legacy templates \\[global, test\\], composable templates will only match a single template" + index: + index: test-1 + id: 1 + refresh: true + body: + { metrics.host.name: localhost, metrics.host.id: 1, metrics.time: 10, metrics.time.max: 100, metrics.time.min: 1 } + + - do: + field_caps: + index: test-1 + fields: metrics* + - match: {fields.metrics\.host\.id.long.searchable: true} + - match: {fields.metrics\.host\.id.long.aggregatable: true} + - match: {fields.metrics\.host\.name.keyword.searchable: true} + - match: {fields.metrics\.host\.name.keyword.aggregatable: true} + - match: {fields.metrics\.time.long.searchable: true} + - match: {fields.metrics\.time.long.aggregatable: true} + - match: {fields.metrics\.time\.max.long.searchable: true} + - match: {fields.metrics\.time\.max.long.aggregatable: true} + - match: {fields.metrics\.time\.min.long.searchable: true} + - match: {fields.metrics\.time\.min.long.aggregatable: true} + + - do: + get: + index: test-1 + id: 1 + - match: {_index: "test-1"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + metrics: + host.name: localhost + host.id: 1 + time: 10 + time.max: 100 + time.min: 1 + +--- +"Root without subobjects with synthetic source": + - requires: + test_runner_features: allowed_warnings_regex + cluster_features: ["mapper.subobjects_auto"] + reason: added in 8.4.0 + + - do: + indices.put_template: + name: test + body: + index_patterns: test-* + mappings: + _source: + mode: synthetic + subobjects: auto + properties: + host.name: + type: keyword + + - do: + allowed_warnings_regex: + - "index \\[test-1\\] matches multiple legacy templates \\[global, test\\], composable templates will only match a single template" + index: + index: test-1 + id: 1 + refresh: true + body: + { host.name: localhost, host.id: 1, time: 10, time.max: 100, time.min: 1 } + + - do: + field_caps: + index: test-1 + fields: [host*, time*] + - match: {fields.host\.name.keyword.searchable: true} + - match: {fields.host\.name.keyword.aggregatable: true} + - match: {fields.host\.id.long.searchable: true} + - match: {fields.host\.id.long.aggregatable: true} + - match: {fields.time.long.searchable: true} + - match: {fields.time.long.aggregatable: true} + - match: {fields.time\.max.long.searchable: true} + - match: {fields.time\.max.long.aggregatable: true} + - match: {fields.time\.min.long.searchable: true} + - match: {fields.time\.min.long.aggregatable: true} + + - do: + get: + index: test-1 + id: 1 + - match: {_index: "test-1"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + host.name: localhost + host.id: 1 + time: 10 + time.max: 100 + time.min: 1 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index e51074ee55270..1393d5454a9da 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -1250,3 +1250,92 @@ empty nested object sorted as a first document: - match: { hits.hits.1._source.name: B } - match: { hits.hits.1._source.nested.a: "b" } + + +--- +subobjects auto: + - requires: + cluster_features: ["mapper.subobjects_auto"] + reason: requires tracking ignored source and supporting subobjects auto setting + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + subobjects: auto + properties: + id: + type: integer + regular: + properties: + span: + properties: + id: + type: keyword + trace: + properties: + id: + type: keyword + stored: + store_array_source: true + properties: + span: + properties: + id: + type: keyword + trace: + properties: + id: + type: keyword + nested: + type: nested + auto_obj: + type: object + subobjects: auto + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "id": 1, "foo": 10, "foo.bar": 100, "regular": [ { "trace": { "id": "a" }, "span": { "id": "1" } }, { "trace": { "id": "b" }, "span": { "id": "1" } } ] }' + - '{ "create": { } }' + - '{ "id": 2, "foo": 20, "foo.bar": 200, "stored": [ { "trace": { "id": "a" }, "span": { "id": "1" } }, { "trace": { "id": "b" }, "span": { "id": "1" } } ] }' + - '{ "create": { } }' + - '{ "id": 3, "foo": 30, "foo.bar": 300, "nested": [ { "a": 10, "b": 20 }, { "a": 100, "b": 200 } ] }' + - '{ "create": { } }' + - '{ "id": 4, "auto_obj": { "foo": 40, "foo.bar": 400 } }' + + - match: { errors: false } + + - do: + search: + index: test + sort: id + + - match: { hits.hits.0._source.id: 1 } + - match: { hits.hits.0._source.foo: 10 } + - match: { hits.hits.0._source.foo\.bar: 100 } + - match: { hits.hits.0._source.regular.span.id: "1" } + - match: { hits.hits.0._source.regular.trace.id: [ "a", "b" ] } + - match: { hits.hits.1._source.id: 2 } + - match: { hits.hits.1._source.foo: 20 } + - match: { hits.hits.1._source.foo\.bar: 200 } + - match: { hits.hits.1._source.stored.0.trace.id: a } + - match: { hits.hits.1._source.stored.0.span.id: "1" } + - match: { hits.hits.1._source.stored.1.trace.id: b } + - match: { hits.hits.1._source.stored.1.span.id: "1" } + - match: { hits.hits.2._source.id: 3 } + - match: { hits.hits.2._source.foo: 30 } + - match: { hits.hits.2._source.foo\.bar: 300 } + - match: { hits.hits.2._source.nested.0.a: 10 } + - match: { hits.hits.2._source.nested.0.b: 20 } + - match: { hits.hits.2._source.nested.1.a: 100 } + - match: { hits.hits.2._source.nested.1.b: 200 } + - match: { hits.hits.3._source.id: 4 } + - match: { hits.hits.3._source.auto_obj.foo: 40 } + - match: { hits.hits.3._source.auto_obj.foo\.bar: 400 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml index 45bcf64f98945..3d82539944a97 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml @@ -449,6 +449,115 @@ index: test-generic - match: { test-generic.mappings.properties.parent.properties.child\.grandchild.type: "keyword" } + +--- +"Composable index templates that include subobjects: auto at root": + - requires: + cluster_features: ["mapper.subobjects_auto"] + reason: "https://github.com/elastic/elasticsearch/issues/96768 fixed at 8.11.0" + test_runner_features: "allowed_warnings" + + - do: + cluster.put_component_template: + name: test-subobjects + body: + template: + mappings: + subobjects: auto + properties: + message: + enabled: false + + - do: + cluster.put_component_template: + name: test-field + body: + template: + mappings: + properties: + parent.subfield: + type: keyword + + - do: + allowed_warnings: + - "index template [test-composable-template] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test-composable-template] will take precedence during new index creation" + indices.put_index_template: + name: test-composable-template + body: + index_patterns: + - test-* + composed_of: + - test-subobjects + - test-field + - is_true: acknowledged + + - do: + indices.create: + index: test-generic + + - do: + indices.get_mapping: + index: test-generic + - match: { test-generic.mappings.properties.parent\.subfield.type: "keyword" } + - match: { test-generic.mappings.properties.message.type: "object" } + +--- +"Composable index templates that include subobjects: auto on arbitrary field": + - requires: + cluster_features: ["mapper.subobjects_auto"] + reason: "https://github.com/elastic/elasticsearch/issues/96768 fixed at 8.11.0" + test_runner_features: "allowed_warnings" + + - do: + cluster.put_component_template: + name: test-subobjects + body: + template: + mappings: + properties: + parent: + type: object + subobjects: auto + properties: + message: + enabled: false + + - do: + cluster.put_component_template: + name: test-subfield + body: + template: + mappings: + properties: + parent: + properties: + child.grandchild: + type: keyword + + - do: + allowed_warnings: + - "index template [test-composable-template] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test-composable-template] will take precedence during new index creation" + indices.put_index_template: + name: test-composable-template + body: + index_patterns: + - test-* + composed_of: + - test-subobjects + - test-subfield + - is_true: acknowledged + + - do: + indices.create: + index: test-generic + + - do: + indices.get_mapping: + index: test-generic + - match: { test-generic.mappings.properties.parent.properties.child\.grandchild.type: "keyword" } + - match: { test-generic.mappings.properties.parent.properties.message.type: "object" } + + --- "Composition of component templates with different legal field mappings": - skip: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml index 703f2a0352fbd..c120bed2d369d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml @@ -1125,3 +1125,55 @@ fetch geo_point: - match: { hits.hits.0.fields.root\.keyword.0: 'parent' } - match: { hits.hits.0.fields.root\.subfield.0: 'child' } - match: { hits.hits.0.fields.root\.subfield\.keyword.0: 'child' } + +--- +"Test with subobjects: auto": + - requires: + cluster_features: "mapper.subobjects_auto" + reason: requires support for subobjects auto setting + + - do: + indices.create: + index: test + body: + mappings: + subobjects: auto + properties: + message: + type: object + subobjects: auto + enabled: false + + - do: + index: + index: test + refresh: true + body: > + { + "root": "parent", + "root.subfield": "child", + "message": { + "foo": 10, + "foo.bar": 20 + } + } + - match: {result: "created"} + + - do: + search: + index: test + body: + query: + term: + root.subfield: + value: 'child' + fields: + - field: 'root*' + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.root.0: 'parent' } + - match: { hits.hits.0.fields.root\.keyword.0: 'parent' } + - match: { hits.hits.0.fields.root\.subfield.0: 'child' } + - match: { hits.hits.0.fields.root\.subfield\.keyword.0: 'child' } + - is_false: hits.hits.0.fields.message + - match: { hits.hits.0._source.message.foo: 10 } + - match: { hits.hits.0._source.message.foo\.bar: 20 } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 2bf3668a3dabe..35f0130c58706 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -41,6 +41,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.function.Consumer; import static org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.MAX_DIMS_COUNT; @@ -476,7 +477,7 @@ static void parseObjectOrField(DocumentParserContext context, Mapper mapper) thr private static boolean shouldFlattenObject(DocumentParserContext context, FieldMapper fieldMapper) { return context.parser().currentToken() == XContentParser.Token.START_OBJECT - && context.parent().subobjects() == false + && context.parent().subobjects() != ObjectMapper.Subobjects.ENABLED && fieldMapper.supportsParsingObject() == false; } @@ -517,7 +518,7 @@ private static void parseObject(final DocumentParserContext context, String curr private static void doParseObject(DocumentParserContext context, String currentFieldName, Mapper objectMapper) throws IOException { context.path().add(currentFieldName); boolean withinLeafObject = context.path().isWithinLeafObject(); - if (objectMapper instanceof ObjectMapper objMapper && objMapper.subobjects() == false) { + if (objectMapper instanceof ObjectMapper objMapper && objMapper.subobjects() != ObjectMapper.Subobjects.ENABLED) { context.path().setWithinLeafObject(true); } parseObjectOrField(context, objectMapper); @@ -563,7 +564,7 @@ private static void parseObjectDynamic(DocumentParserContext context, String cur } else { dynamicObjectMapper = DynamicFieldsBuilder.createDynamicObjectMapper(context, currentFieldName); } - if (context.parent().subobjects() == false) { + if (context.parent().subobjects() == ObjectMapper.Subobjects.DISABLED) { if (dynamicObjectMapper instanceof NestedObjectMapper) { throw new DocumentParsingException( context.parser().getTokenLocation(), @@ -1012,7 +1013,7 @@ private static class NoOpObjectMapper extends ObjectMapper { name, fullPath, Explicit.IMPLICIT_TRUE, - Explicit.IMPLICIT_TRUE, + Optional.empty(), Explicit.IMPLICIT_FALSE, Dynamic.RUNTIME, Collections.emptyMap() @@ -1051,7 +1052,7 @@ private static class RootDocumentParserContext extends DocumentParserContext { mappingLookup.getMapping().getRoot(), ObjectMapper.Dynamic.getRootDynamic(mappingLookup) ); - if (mappingLookup.getMapping().getRoot().subobjects()) { + if (mappingLookup.getMapping().getRoot().subobjects() == ObjectMapper.Subobjects.ENABLED) { this.parser = DotExpandingXContentParser.expandDots(parser, this.path); } else { this.parser = parser; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java b/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java index d479cb97e3fd3..6eb1920df02c8 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java @@ -161,7 +161,7 @@ static Mapper createDynamicObjectMapper(DocumentParserContext context, String na Mapper mapper = createObjectMapperFromTemplate(context, name); return mapper != null ? mapper - : new ObjectMapper.Builder(name, ObjectMapper.Defaults.SUBOBJECTS).enabled(ObjectMapper.Defaults.ENABLED) + : new ObjectMapper.Builder(name, context.parent().subobjects).enabled(ObjectMapper.Defaults.ENABLED) .build(context.createDynamicMapperBuilderContext()); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index 15d77ba6d2229..7810fcdc64773 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -31,7 +31,8 @@ public Set getFeatures() { KeywordFieldMapper.KEYWORD_DIMENSION_IGNORE_ABOVE, IndexModeFieldMapper.QUERYING_INDEX_MODE, NodeMappingStats.SEGMENT_LEVEL_FIELDS_STATS, - BooleanFieldMapper.BOOLEAN_DIMENSION + BooleanFieldMapper.BOOLEAN_DIMENSION, + ObjectMapper.SUBOBJECTS_AUTO ); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index f3c438adcea09..d866b3c78173b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Stream; @@ -49,7 +50,7 @@ public static class Builder extends ObjectMapper.Builder { private final Function bitSetProducer; public Builder(String name, IndexVersion indexCreatedVersion, Function bitSetProducer) { - super(name, Explicit.IMPLICIT_TRUE); + super(name, Optional.empty()); this.indexCreatedVersion = indexCreatedVersion; this.bitSetProducer = bitSetProducer; } @@ -121,7 +122,7 @@ public static class TypeParser extends ObjectMapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, MappingParserContext parserContext) throws MapperParsingException { - if (parseSubobjects(node).explicit()) { + if (parseSubobjects(node).isPresent()) { throw new MapperParsingException("Nested type [" + name + "] does not support [subobjects] parameter"); } NestedObjectMapper.Builder builder = new NestedObjectMapper.Builder( @@ -209,7 +210,7 @@ public MapperBuilderContext createChildContext(String name, Dynamic dynamic) { Query nestedTypeFilter, Function bitsetProducer ) { - super(name, fullPath, enabled, Explicit.IMPLICIT_TRUE, storeArraySource, dynamic, mappers); + super(name, fullPath, enabled, Optional.empty(), storeArraySource, dynamic, mappers); this.parentTypeFilter = parentTypeFilter; this.nestedTypePath = nestedTypePath; this.nestedTypeFilter = nestedTypeFilter; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index e504702d84c1e..29ec0357d7c1e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Nullable; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperService.MergeReason; @@ -33,6 +34,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.TreeMap; import java.util.stream.Stream; @@ -41,10 +43,50 @@ public class ObjectMapper extends Mapper { public static final String CONTENT_TYPE = "object"; static final String STORE_ARRAY_SOURCE_PARAM = "store_array_source"; + static final NodeFeature SUBOBJECTS_AUTO = new NodeFeature("mapper.subobjects_auto"); + + /** + * Enhances the previously boolean option for subobjects support with an intermediate mode `auto` that uses + * any objects that are present in the mappings and flattens any fields defined outside the predefined objects. + */ + public enum Subobjects { + ENABLED(Boolean.TRUE), + DISABLED(Boolean.FALSE), + AUTO("auto"); + + private final Object printedValue; + + Subobjects(Object printedValue) { + this.printedValue = printedValue; + } + + static Subobjects from(Object node) { + if (node instanceof Boolean value) { + return value ? Subobjects.ENABLED : Subobjects.DISABLED; + } + if (node instanceof String value) { + if (value.equalsIgnoreCase("true")) { + return ENABLED; + } + if (value.equalsIgnoreCase("false")) { + return DISABLED; + } + if (value.equalsIgnoreCase("auto")) { + return AUTO; + } + } + throw new ElasticsearchParseException("unknown subobjects value: " + node); + } + + @Override + public String toString() { + return printedValue.toString(); + } + } public static class Defaults { public static final boolean ENABLED = true; - public static final Explicit SUBOBJECTS = Explicit.IMPLICIT_TRUE; + public static final Optional SUBOBJECTS = Optional.empty(); public static final Explicit STORE_ARRAY_SOURCE = Explicit.IMPLICIT_FALSE; public static final Dynamic DYNAMIC = Dynamic.TRUE; } @@ -81,13 +123,13 @@ static Dynamic getRootDynamic(MappingLookup mappingLookup) { } public static class Builder extends Mapper.Builder { - protected final Explicit subobjects; + protected Optional subobjects; protected Explicit enabled = Explicit.IMPLICIT_TRUE; protected Explicit storeArraySource = Defaults.STORE_ARRAY_SOURCE; protected Dynamic dynamic; protected final List mappersBuilders = new ArrayList<>(); - public Builder(String name, Explicit subobjects) { + public Builder(String name, Optional subobjects) { super(name); this.subobjects = subobjects; } @@ -132,20 +174,27 @@ public Mapper build(MapperBuilderContext context) { public final void addDynamic(String name, String prefix, Mapper mapper, DocumentParserContext context) { // If the mapper to add has no dots, or the current object mapper has subobjects set to false, // we just add it as it is for sure a leaf mapper - if (name.contains(".") == false || subobjects.value() == false) { + if (name.contains(".") == false || (subobjects.isPresent() && (subobjects.get() == Subobjects.DISABLED))) { add(name, mapper); - } - // otherwise we strip off the first object path of the mapper name, load or create - // the relevant object mapper, and then recurse down into it, passing the remainder - // of the mapper name. So for a mapper 'foo.bar.baz', we locate 'foo' and then - // call addDynamic on it with the name 'bar.baz', and next call addDynamic on 'bar' with the name 'baz'. - else { + } else { + // We strip off the first object path of the mapper name, load or create + // the relevant object mapper, and then recurse down into it, passing the remainder + // of the mapper name. So for a mapper 'foo.bar.baz', we locate 'foo' and then + // call addDynamic on it with the name 'bar.baz', and next call addDynamic on 'bar' with the name 'baz'. int firstDotIndex = name.indexOf('.'); String immediateChild = name.substring(0, firstDotIndex); String immediateChildFullName = prefix == null ? immediateChild : prefix + "." + immediateChild; Builder parentBuilder = findObjectBuilder(immediateChildFullName, context); - parentBuilder.addDynamic(name.substring(firstDotIndex + 1), immediateChildFullName, mapper, context); - add(parentBuilder); + if (parentBuilder != null) { + parentBuilder.addDynamic(name.substring(firstDotIndex + 1), immediateChildFullName, mapper, context); + add(parentBuilder); + } else if (subobjects.isPresent() && subobjects.get() == Subobjects.AUTO) { + // No matching parent object was found, the mapper is added as a leaf - similar to subobjects false. + add(name, mapper); + } else { + // Expected to find a matching parent object but got null. + throw new IllegalStateException("Missing intermediate object " + immediateChildFullName); + } } } @@ -160,7 +209,8 @@ private static Builder findObjectBuilder(String fullName, DocumentParserContext if (objectMapper != null) { return objectMapper.newBuilder(context.indexSettings().getIndexVersionCreated()); } - throw new IllegalStateException("Missing intermediate object " + fullName); + // no object mapper found + return null; } protected final Map buildMappers(MapperBuilderContext mapperBuilderContext) { @@ -176,7 +226,7 @@ protected final Map buildMappers(MapperBuilderContext mapperBuil // mix of object notation and dot notation. mapper = existing.merge(mapper, MapperMergeContext.from(mapperBuilderContext, Long.MAX_VALUE)); } - if (subobjects.value() == false && mapper instanceof ObjectMapper objectMapper) { + if (subobjects.isPresent() && subobjects.get() == Subobjects.DISABLED && mapper instanceof ObjectMapper objectMapper) { // We're parsing a mapping that has set `subobjects: false` but has defined sub-objects objectMapper.asFlattenedFieldMappers(mapperBuilderContext).forEach(m -> mappers.put(m.leafName(), m)); } else { @@ -215,7 +265,7 @@ public boolean supportsVersion(IndexVersion indexCreatedVersion) { public Mapper.Builder parse(String name, Map node, MappingParserContext parserContext) throws MapperParsingException { parserContext.incrementMappingObjectDepth(); // throws MapperParsingException if depth limit is exceeded - Explicit subobjects = parseSubobjects(node); + Optional subobjects = parseSubobjects(node); Builder builder = new Builder(name, subobjects); parseObjectFields(node, parserContext, builder); parserContext.decrementMappingObjectDepth(); @@ -277,10 +327,10 @@ protected static boolean parseObjectOrDocumentTypeProperties( return false; } - protected static Explicit parseSubobjects(Map node) { + protected static Optional parseSubobjects(Map node) { Object subobjectsNode = node.remove("subobjects"); if (subobjectsNode != null) { - return Explicit.explicitBoolean(XContentMapValues.nodeBooleanValue(subobjectsNode, "subobjects.subobjects")); + return Optional.of(Subobjects.from(subobjectsNode)); } return Defaults.SUBOBJECTS; } @@ -317,7 +367,9 @@ protected static void parseProperties(Builder objBuilder, Map pr } } - if (objBuilder.subobjects.value() == false && type.equals(NestedObjectMapper.CONTENT_TYPE)) { + if (objBuilder.subobjects.isPresent() + && objBuilder.subobjects.get() == Subobjects.DISABLED + && type.equals(NestedObjectMapper.CONTENT_TYPE)) { throw new MapperParsingException( "Tried to add nested object [" + fieldName @@ -331,7 +383,7 @@ protected static void parseProperties(Builder objBuilder, Map pr throw new MapperParsingException("No handler for type [" + type + "] declared on field [" + fieldName + "]"); } Mapper.Builder fieldBuilder; - if (objBuilder.subobjects.value() == false) { + if (objBuilder.subobjects.isPresent() && objBuilder.subobjects.get() != Subobjects.ENABLED) { fieldBuilder = typeParser.parse(fieldName, propNode, parserContext); } else { String[] fieldNameParts = fieldName.split("\\."); @@ -379,7 +431,7 @@ private static void validateFieldName(String fieldName, IndexVersion indexCreate private final String fullPath; protected final Explicit enabled; - protected final Explicit subobjects; + protected final Optional subobjects; protected final Explicit storeArraySource; protected final Dynamic dynamic; @@ -389,7 +441,7 @@ private static void validateFieldName(String fieldName, IndexVersion indexCreate String name, String fullPath, Explicit enabled, - Explicit subobjects, + Optional subobjects, Explicit storeArraySource, Dynamic dynamic, Map mappers @@ -407,7 +459,9 @@ private static void validateFieldName(String fieldName, IndexVersion indexCreate } else { this.mappers = Map.copyOf(mappers); } - assert subobjects.value() || this.mappers.values().stream().noneMatch(m -> m instanceof ObjectMapper) + assert subobjects.isEmpty() + || subobjects.get() != Subobjects.DISABLED + || this.mappers.values().stream().noneMatch(m -> m instanceof ObjectMapper) : "When subobjects is false, mappers must not contain an ObjectMapper"; } @@ -460,8 +514,8 @@ public final Dynamic dynamic() { return dynamic; } - public final boolean subobjects() { - return subobjects.value(); + public final Subobjects subobjects() { + return subobjects.orElse(Subobjects.ENABLED); } public final boolean storeArraySource() { @@ -502,7 +556,7 @@ public ObjectMapper merge(Mapper mergeWith, MapperMergeContext parentMergeContex protected record MergeResult( Explicit enabled, - Explicit subObjects, + Optional subObjects, Explicit trackArraySource, Dynamic dynamic, Map mappers @@ -523,11 +577,11 @@ static MergeResult build(ObjectMapper existing, ObjectMapper mergeWithObject, Ma } else { enabled = existing.enabled; } - final Explicit subObjects; - if (mergeWithObject.subobjects.explicit()) { + final Optional subObjects; + if (mergeWithObject.subobjects.isPresent()) { if (reason == MergeReason.INDEX_TEMPLATE) { subObjects = mergeWithObject.subobjects; - } else if (existing.subobjects != mergeWithObject.subobjects) { + } else if (existing.subobjects() != mergeWithObject.subobjects()) { throw new MapperException( "the [subobjects] parameter can't be updated for the object mapping [" + existing.fullPath() + "]" ); @@ -552,7 +606,7 @@ static MergeResult build(ObjectMapper existing, ObjectMapper mergeWithObject, Ma trackArraySource = existing.storeArraySource; } MapperMergeContext objectMergeContext = existing.createChildContext(parentMergeContext, existing.leafName()); - Map mergedMappers = buildMergedMappers(existing, mergeWithObject, objectMergeContext, subObjects.value()); + Map mergedMappers = buildMergedMappers(existing, mergeWithObject, objectMergeContext, subObjects); return new MergeResult( enabled, subObjects, @@ -566,11 +620,13 @@ private static Map buildMergedMappers( ObjectMapper existing, ObjectMapper mergeWithObject, MapperMergeContext objectMergeContext, - boolean subobjects + Optional subobjects ) { Map mergedMappers = new HashMap<>(); for (Mapper childOfExistingMapper : existing.mappers.values()) { - if (subobjects == false && childOfExistingMapper instanceof ObjectMapper objectMapper) { + if (subobjects.isPresent() + && subobjects.get() == Subobjects.DISABLED + && childOfExistingMapper instanceof ObjectMapper objectMapper) { // An existing mapping with sub-objects is merged with a mapping that has set `subobjects: false` objectMapper.asFlattenedFieldMappers(objectMergeContext.getMapperBuilderContext()) .forEach(m -> mergedMappers.put(m.leafName(), m)); @@ -581,7 +637,9 @@ private static Map buildMergedMappers( for (Mapper mergeWithMapper : mergeWithObject) { Mapper mergeIntoMapper = mergedMappers.get(mergeWithMapper.leafName()); if (mergeIntoMapper == null) { - if (subobjects == false && mergeWithMapper instanceof ObjectMapper objectMapper) { + if (subobjects.isPresent() + && subobjects.get() == Subobjects.DISABLED + && mergeWithMapper instanceof ObjectMapper objectMapper) { // An existing mapping that has set `subobjects: false` is merged with a mapping with sub-objects objectMapper.asFlattenedFieldMappers(objectMergeContext.getMapperBuilderContext()) .stream() @@ -593,7 +651,8 @@ private static Map buildMergedMappers( putMergedMapper(mergedMappers, truncateObjectMapper(objectMergeContext, om)); } } else if (mergeIntoMapper instanceof ObjectMapper objectMapper) { - assert subobjects : "existing object mappers are supposed to be flattened if subobjects is false"; + assert subobjects.isEmpty() || subobjects.get() != Subobjects.DISABLED + : "existing object mappers are supposed to be flattened if subobjects is false"; putMergedMapper(mergedMappers, objectMapper.merge(mergeWithMapper, objectMergeContext)); } else { assert mergeIntoMapper instanceof FieldMapper || mergeIntoMapper instanceof FieldAliasMapper; @@ -675,7 +734,7 @@ private void ensureFlattenable(MapperBuilderContext context, ContentPath path) { if (isEnabled() == false) { throwAutoFlatteningException(path, "the value of [enabled] is [false]"); } - if (subobjects.explicit() && subobjects()) { + if (subobjects.isPresent() && subobjects.get() == Subobjects.ENABLED) { throwAutoFlatteningException(path, "the value of [subobjects] is [true]"); } } @@ -710,8 +769,8 @@ void toXContent(XContentBuilder builder, Params params, ToXContent custom) throw if (isEnabled() != Defaults.ENABLED) { builder.field("enabled", enabled.value()); } - if (subobjects != Defaults.SUBOBJECTS) { - builder.field("subobjects", subobjects.value()); + if (subobjects.isPresent()) { + builder.field("subobjects", subobjects.get().printedValue); } if (storeArraySource != Defaults.STORE_ARRAY_SOURCE) { builder.field(STORE_ARRAY_SOURCE_PARAM, storeArraySource.value()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java index 0b7f4de157bdc..7370fe3c61772 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java @@ -18,6 +18,7 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Optional; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIntegerValue; @@ -52,7 +53,7 @@ public static class Builder extends ObjectMapper.Builder { public Builder(String name) { // Subobjects are not currently supported. - super(name, Explicit.IMPLICIT_FALSE); + super(name, Optional.of(Subobjects.DISABLED)); } @Override @@ -103,7 +104,7 @@ public PassThroughObjectMapper build(MapperBuilderContext context) { int priority ) { // Subobjects are not currently supported. - super(name, fullPath, enabled, Explicit.IMPLICIT_FALSE, Explicit.IMPLICIT_FALSE, dynamic, mappers); + super(name, fullPath, enabled, Optional.of(Subobjects.DISABLED), Explicit.IMPLICIT_FALSE, dynamic, mappers); this.timeSeriesDimensionSubFields = timeSeriesDimensionSubFields; this.priority = priority; if (priority < 0) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index 11aabd8726f4f..6c178330e5c9e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -34,6 +34,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.function.BiConsumer; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; @@ -75,7 +76,7 @@ public static class Builder extends ObjectMapper.Builder { protected Explicit dateDetection = Defaults.DATE_DETECTION; protected Explicit numericDetection = Defaults.NUMERIC_DETECTION; - public Builder(String name, Explicit subobjects) { + public Builder(String name, Optional subobjects) { super(name, subobjects); } @@ -132,7 +133,7 @@ public RootObjectMapper build(MapperBuilderContext context) { RootObjectMapper( String name, Explicit enabled, - Explicit subobjects, + Optional subobjects, Explicit trackArraySource, Dynamic dynamic, Map mappers, @@ -442,7 +443,7 @@ protected boolean isRoot() { public static RootObjectMapper.Builder parse(String name, Map node, MappingParserContext parserContext) throws MapperParsingException { - Explicit subobjects = parseSubobjects(node); + Optional subobjects = parseSubobjects(node); RootObjectMapper.Builder builder = new Builder(name, subobjects); Iterator> iterator = node.entrySet().iterator(); while (iterator.hasNext()) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java index a138f0910e6ec..878bdc91bba06 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.common.Explicit; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; @@ -19,6 +18,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.Optional; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; @@ -69,7 +69,7 @@ public void testCreateDynamicStringFieldAsKeywordForDimension() throws IOExcepti SourceToParse sourceToParse = new SourceToParse("test", new BytesArray(source), XContentType.JSON); SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder(null, Settings.EMPTY, false).setSynthetic().build(); - RootObjectMapper root = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper root = new RootObjectMapper.Builder("_doc", Optional.empty()).add( new PassThroughObjectMapper.Builder("labels").setPriority(0).setContainsDimensions().dynamic(ObjectMapper.Dynamic.TRUE) ).build(MapperBuilderContext.root(false, false)); Mapping mapping = new Mapping(root, new MetadataFieldMapper[] { sourceMapper }, Map.of()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java index 61926d72982d8..a5a5d9726f233 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java @@ -1132,6 +1132,14 @@ public void testDynamicRuntimeWithDynamicTemplate() throws IOException { } private MapperService createDynamicTemplateNoSubobjects() throws IOException { + return createDynamicTemplateWithSubobjects("false"); + } + + private MapperService createDynamicTemplateAutoSubobjects() throws IOException { + return createDynamicTemplateWithSubobjects("auto"); + } + + private MapperService createDynamicTemplateWithSubobjects(String subobjects) throws IOException { return createMapperService(topMapping(b -> { b.startArray("dynamic_templates"); { @@ -1141,7 +1149,7 @@ private MapperService createDynamicTemplateNoSubobjects() throws IOException { { b.field("match_mapping_type", "object"); b.field("match", "metric"); - b.startObject("mapping").field("type", "object").field("subobjects", false).endObject(); + b.startObject("mapping").field("type", "object").field("subobjects", subobjects).endObject(); } b.endObject(); } @@ -1388,7 +1396,7 @@ public void testDynamicSubobjectsFalseDynamicFalse() throws Exception { assertEquals(ObjectMapper.Dynamic.FALSE, metrics.dynamic()); assertEquals(1, metrics.mappers.size()); ObjectMapper service = (ObjectMapper) metrics.getMapper("service"); - assertFalse(service.subobjects()); + assertEquals(ObjectMapper.Subobjects.DISABLED, service.subobjects()); assertEquals(1, service.mappers.size()); assertNotNull(service.getMapper("time")); } @@ -1434,6 +1442,255 @@ public void testSubobjectsFalseWithInnerNestedFromDynamicTemplate() { ); } + public void testSubobjectsAutoFlatPaths() throws IOException { + MapperService mapperService = createDynamicTemplateAutoSubobjects(); + ParsedDocument doc = mapperService.documentMapper().parse(source(b -> { + b.field("foo.metric.count", 10); + b.field("foo.bar.baz", 10); + b.field("foo.metric.count.min", 4); + b.field("foo.metric.count.max", 15); + })); + merge(mapperService, dynamicMapping(doc.dynamicMappingsUpdate())); + assertNoSubobjects(mapperService); + } + + public void testSubobjectsAutoStructuredPaths() throws IOException { + MapperService mapperService = createDynamicTemplateAutoSubobjects(); + ParsedDocument doc = mapperService.documentMapper().parse(source(b -> { + b.startObject("foo"); + { + b.startObject("metric"); + { + b.field("count", 10); + b.field("count.min", 4); + b.field("count.max", 15); + } + b.endObject(); + b.startObject("bar"); + b.field("baz", 10); + b.endObject(); + } + b.endObject(); + })); + merge(mapperService, dynamicMapping(doc.dynamicMappingsUpdate())); + assertNoSubobjects(mapperService); + } + + public void testSubobjectsAutoArrayOfObjects() throws IOException { + MapperService mapperService = createDynamicTemplateAutoSubobjects(); + ParsedDocument doc = mapperService.documentMapper().parse(source(b -> { + b.startObject("foo"); + { + b.startArray("metric"); + { + b.startObject(); + { + b.field("count", 10); + b.field("count.min", 4); + b.field("count.max", 15); + } + b.endObject(); + b.startObject(); + { + b.field("count", 5); + b.field("count.min", 3); + b.field("count.max", 50); + } + b.endObject(); + } + b.endArray(); + b.startObject("bar"); + b.field("baz", 10); + b.endObject(); + } + b.endObject(); + })); + merge(mapperService, dynamicMapping(doc.dynamicMappingsUpdate())); + assertNoSubobjects(mapperService); + } + + public void testSubobjectAutoDynamicNested() throws IOException { + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.startArray("dynamic_templates"); + { + b.startObject(); + b.startObject("nested"); + { + b.field("match", "object"); + b.startObject("mapping"); + { + b.field("type", "nested"); + } + b.endObject(); + } + b.endObject(); + b.endObject(); + } + b.endArray(); + b.startObject("properties"); + b.startObject("metrics").field("type", "object").field("subobjects", "auto").endObject(); + b.endObject(); + })); + + ParsedDocument doc = mapper.parse(source(""" + { + "metrics.object" : { + "foo" : "bar" + } + } + """)); + + assertNotNull(doc.docs().get(0).get("metrics.object.foo")); + assertThat( + ((ObjectMapper) doc.dynamicMappingsUpdate().getRoot().getMapper("metrics")).getMapper("object"), + instanceOf(NestedObjectMapper.class) + ); + } + + public void testRootSubobjectAutoDynamicNested() throws IOException { + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.startArray("dynamic_templates"); + { + b.startObject(); + b.startObject("nested"); + { + b.field("match", "object"); + b.startObject("mapping"); + { + b.field("type", "nested"); + } + b.endObject(); + } + b.endObject(); + b.endObject(); + } + b.endArray(); + b.field("subobjects", "auto"); + })); + + ParsedDocument doc = mapper.parse(source(""" + { + "object" : { + "foo" : "bar" + } + } + """)); + + assertNotNull(doc.docs().get(0).get("object.foo")); + assertThat(doc.dynamicMappingsUpdate().getRoot().getMapper("object"), instanceOf(NestedObjectMapper.class)); + } + + public void testDynamicSubobjectsAutoDynamicFalse() throws Exception { + // verify that we read the dynamic value properly from the parent mapper. DocumentParser#dynamicOrDefault splits the field + // name where dots are found, but it does that only for the parent prefix e.g. metrics.service and not for the leaf suffix time.max + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.startArray("dynamic_templates"); + { + b.startObject(); + b.startObject("metrics"); + { + b.field("match", "metrics"); + b.startObject("mapping"); + { + b.field("type", "object"); + b.field("dynamic", "false"); + b.startObject("properties"); + { + b.startObject("service"); + { + b.field("type", "object"); + b.field("subobjects", "auto"); + b.startObject("properties"); + { + b.startObject("time"); + b.field("type", "keyword"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + b.endObject(); + } + b.endArray(); + })); + + ParsedDocument doc = mapper.parse(source(""" + { + "metrics": { + "service": { + "time" : 10, + "time.max" : 500 + } + } + } + """)); + + assertNotNull(doc.rootDoc().getField("metrics.service.time")); + assertNull(doc.rootDoc().getField("metrics.service.time.max")); + assertNotNull(doc.dynamicMappingsUpdate()); + ObjectMapper metrics = (ObjectMapper) doc.dynamicMappingsUpdate().getRoot().getMapper("metrics"); + assertEquals(ObjectMapper.Dynamic.FALSE, metrics.dynamic()); + assertEquals(1, metrics.mappers.size()); + ObjectMapper service = (ObjectMapper) metrics.getMapper("service"); + assertEquals(ObjectMapper.Subobjects.AUTO, service.subobjects()); + assertEquals(1, service.mappers.size()); + assertNotNull(service.getMapper("time")); + } + + public void testSubobjectsAutoWithInnerNestedFromDynamicTemplate() throws IOException { + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.startArray("dynamic_templates"); + { + b.startObject(); + { + b.startObject("test"); + { + b.field("match", "metrics"); + b.startObject("mapping"); + { + b.field("type", "object").field("subobjects", "auto"); + b.startObject("properties"); + { + b.startObject("time"); + b.field("type", "nested"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endArray(); + })); + + ParsedDocument doc = mapper.parse(source(""" + { + "metrics": { + "time" : { + "foo" : "bar" + }, + "time.max" : 500 + } + } + """)); + + assertNotNull(doc.rootDoc().get("metrics.time.max")); + assertNotNull(doc.docs().get(0).get("metrics.time.foo")); + assertThat( + ((ObjectMapper) doc.dynamicMappingsUpdate().getRoot().getMapper("metrics")).getMapper("time"), + instanceOf(NestedObjectMapper.class) + ); + } + public void testDynamicSubobject() throws IOException { MapperService mapperService = createMapperService(topMapping(b -> { b.startArray("dynamic_templates"); @@ -1803,7 +2060,7 @@ public void testSubobjectsFalseDocWithEmptyObject() throws IOException { Mapping mapping = doc.dynamicMappingsUpdate(); ObjectMapper artifacts = (ObjectMapper) mapping.getRoot().getMapper("artifacts"); ObjectMapper leaf = (ObjectMapper) artifacts.getMapper("leaf"); - assertFalse(leaf.subobjects()); + assertEquals(ObjectMapper.Subobjects.DISABLED, leaf.subobjects()); } public void testSubobjectsFalseFlattened() throws IOException { @@ -1853,6 +2110,53 @@ public void testSubobjectsFalseFlattened() throws IOException { assertEquals("flattened", fooStructuredMapper.typeName()); } + public void testSubobjectsAutoFlattened() throws IOException { + String mapping = """ + { + "_doc": { + "properties": { + "attributes": { + "type": "object", + "subobjects": "auto" + } + }, + "dynamic_templates": [ + { + "test": { + "path_match": "attributes.resource.*", + "match_mapping_type": "object", + "mapping": { + "type": "flattened" + } + } + } + ] + } + } + """; + String docJson = """ + { + "attributes.resource": { + "complex.attribute": { + "a": "b" + }, + "foo.bar": "baz" + } + } + """; + + MapperService mapperService = createMapperService(mapping); + ParsedDocument parsedDoc = mapperService.documentMapper().parse(source(docJson)); + merge(mapperService, dynamicMapping(parsedDoc.dynamicMappingsUpdate())); + + Mapper fooBarMapper = mapperService.documentMapper().mappers().getMapper("attributes.resource.foo.bar"); + assertNotNull(fooBarMapper); + assertEquals("text", fooBarMapper.typeName()); + Mapper fooStructuredMapper = mapperService.documentMapper().mappers().getMapper("attributes.resource.complex.attribute"); + assertNotNull(fooStructuredMapper); + assertEquals("flattened", fooStructuredMapper.typeName()); + } + public void testMatchWithArrayOfFieldNames() throws IOException { String mapping = """ { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java index d913b86aed2d5..a8669a0befd0d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java @@ -16,6 +16,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.stream.Collectors; import static java.util.Collections.emptyList; @@ -176,7 +177,7 @@ private static ObjectMapper createObjectMapper(String name) { name, name, Explicit.IMPLICIT_TRUE, - Explicit.IMPLICIT_TRUE, + Optional.empty(), Explicit.IMPLICIT_FALSE, ObjectMapper.Dynamic.FALSE, emptyMap() diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java index 251b0ae62f3c5..6a790f7e91118 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java @@ -26,6 +26,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.stream.Collectors; import static java.util.Collections.emptyList; @@ -81,7 +82,7 @@ public void testSubfieldOverride() { "object", "object", Explicit.EXPLICIT_TRUE, - Explicit.IMPLICIT_TRUE, + Optional.empty(), Explicit.IMPLICIT_FALSE, ObjectMapper.Dynamic.TRUE, Collections.singletonMap("object.subfield", fieldMapper) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java index b3bb8cbe697a5..ea6ddf0257d6f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java @@ -7,11 +7,11 @@ */ package org.elasticsearch.index.mapper; -import org.elasticsearch.common.Explicit; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; import java.util.Collections; +import java.util.Optional; import static org.elasticsearch.index.mapper.MapperService.MergeReason.INDEX_TEMPLATE; import static org.elasticsearch.index.mapper.MapperService.MergeReason.MAPPING_UPDATE; @@ -26,9 +26,9 @@ private RootObjectMapper createMapping( boolean includeBarField, boolean includeBazField ) { - RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder("type1", Explicit.IMPLICIT_TRUE); - rootBuilder.add(new ObjectMapper.Builder("disabled", Explicit.IMPLICIT_TRUE).enabled(disabledFieldEnabled)); - ObjectMapper.Builder fooBuilder = new ObjectMapper.Builder("foo", Explicit.IMPLICIT_TRUE).enabled(fooFieldEnabled); + RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder("type1", Optional.empty()); + rootBuilder.add(new ObjectMapper.Builder("disabled", Optional.empty()).enabled(disabledFieldEnabled)); + ObjectMapper.Builder fooBuilder = new ObjectMapper.Builder("foo", Optional.empty()).enabled(fooFieldEnabled); if (includeBarField) { fooBuilder.add(new TextFieldMapper.Builder("bar", createDefaultIndexAnalyzers(), false)); } @@ -77,8 +77,8 @@ public void testMergeWhenDisablingField() { public void testMergeDisabledField() { // GIVEN a mapping with "foo" field disabled // the field is disabled, and we are not trying to re-enable it, hence merge should work - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("disabled", Explicit.IMPLICIT_TRUE) + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("disabled", Optional.empty()) ).build(MapperBuilderContext.root(false, false)); RootObjectMapper merged = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); @@ -100,10 +100,8 @@ public void testMergeEnabled() { public void testMergeEnabledForRootMapper() { String type = MapperService.SINGLE_MAPPING_NAME; - ObjectMapper firstMapper = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).build( - MapperBuilderContext.root(false, false) - ); - ObjectMapper secondMapper = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).enabled(false) + ObjectMapper firstMapper = new RootObjectMapper.Builder("_doc", Optional.empty()).build(MapperBuilderContext.root(false, false)); + ObjectMapper secondMapper = new RootObjectMapper.Builder("_doc", Optional.empty()).enabled(false) .build(MapperBuilderContext.root(false, false)); MapperException e = expectThrows( @@ -144,12 +142,10 @@ public void testMergedFieldNamesFieldWithDotsSubobjectsFalseAtRoot() { } public void testMergedFieldNamesFieldWithDotsSubobjectsFalse() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - createObjectSubobjectsFalseLeafWithDots() - ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - createObjectSubobjectsFalseLeafWithDots() - ).build(MapperBuilderContext.root(false, false)); + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).add(createObjectSubobjectsFalseLeafWithDots()) + .build(MapperBuilderContext.root(false, false)); + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add(createObjectSubobjectsFalseLeafWithDots()) + .build(MapperBuilderContext.root(false, false)); final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); @@ -161,9 +157,9 @@ public void testMergedFieldNamesFieldWithDotsSubobjectsFalse() { } public void testMergedFieldNamesMultiFields() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add(createTextKeywordMultiField("text")) + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).add(createTextKeywordMultiField("text")) .build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add(createTextKeywordMultiField("text")) + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add(createTextKeywordMultiField("text")) .build(MapperBuilderContext.root(false, false)); final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); @@ -177,10 +173,10 @@ public void testMergedFieldNamesMultiFields() { } public void testMergedFieldNamesMultiFieldsWithinSubobjectsFalse() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).add( createObjectSubobjectsFalseLeafWithMultiField() ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( createObjectSubobjectsFalseLeafWithMultiField() ).build(MapperBuilderContext.root(false, false)); @@ -212,9 +208,9 @@ public void testMergeWithLimit() { } public void testMergeWithLimitTruncatedObjectField() { - RootObjectMapper root = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("parent", Explicit.IMPLICIT_FALSE).add( + RootObjectMapper root = new RootObjectMapper.Builder("_doc", Optional.empty()).build(MapperBuilderContext.root(false, false)); + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("parent", Optional.of(ObjectMapper.Subobjects.DISABLED)).add( new KeywordFieldMapper.Builder("child1", IndexVersion.current()) ).add(new KeywordFieldMapper.Builder("child2", IndexVersion.current())) ).build(MapperBuilderContext.root(false, false)); @@ -243,11 +239,11 @@ public void testMergeWithLimitTruncatedObjectField() { } public void testMergeSameObjectDifferentFields() { - RootObjectMapper root = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).add(new KeywordFieldMapper.Builder("child1", IndexVersion.current())) + RootObjectMapper root = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("parent", Optional.empty()).add(new KeywordFieldMapper.Builder("child1", IndexVersion.current())) ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("parent", Optional.empty()).add( new KeywordFieldMapper.Builder("child1", IndexVersion.current()).ignoreAbove(42) ).add(new KeywordFieldMapper.Builder("child2", IndexVersion.current())) ).build(MapperBuilderContext.root(false, false)); @@ -270,10 +266,10 @@ public void testMergeSameObjectDifferentFields() { } public void testMergeWithLimitMultiField() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).add( createTextKeywordMultiField("text", "keyword1") ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( createTextKeywordMultiField("text", "keyword2") ).build(MapperBuilderContext.root(false, false)); @@ -287,10 +283,10 @@ public void testMergeWithLimitMultiField() { } public void testMergeWithLimitRuntimeField() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).addRuntimeField( + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).addRuntimeField( new TestRuntimeField("existing_runtime_field", "keyword") ).add(createTextKeywordMultiField("text", "keyword1")).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).addRuntimeField( + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).addRuntimeField( new TestRuntimeField("existing_runtime_field", "keyword") ).addRuntimeField(new TestRuntimeField("new_runtime_field", "keyword")).build(MapperBuilderContext.root(false, false)); @@ -304,12 +300,12 @@ public void testMergeWithLimitRuntimeField() { } public void testMergeSubobjectsFalseWithObject() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("parent", Explicit.IMPLICIT_FALSE) + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("parent", Optional.of(ObjectMapper.Subobjects.DISABLED)) ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("child", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("parent", Optional.empty()).add( + new ObjectMapper.Builder("child", Optional.empty()).add( new KeywordFieldMapper.Builder("grandchild", IndexVersion.current()) ) ) @@ -326,7 +322,7 @@ private static RootObjectMapper createRootSubobjectFalseLeafWithDots() { FieldMapper fieldMapper = fieldBuilder.build(MapperBuilderContext.root(false, false)); assertEquals("host.name", fieldMapper.leafName()); assertEquals("host.name", fieldMapper.fullPath()); - return new RootObjectMapper.Builder("_doc", Explicit.EXPLICIT_FALSE).add(fieldBuilder) + return new RootObjectMapper.Builder("_doc", Optional.of(ObjectMapper.Subobjects.DISABLED)).add(fieldBuilder) .build(MapperBuilderContext.root(false, false)); } @@ -346,7 +342,7 @@ private static ObjectMapper.Builder createObjectSubobjectsFalseLeafWithDots() { assertEquals("host.name", fieldMapper.leafName()); assertEquals("foo.metrics.host.name", fieldMapper.fullPath()); return new ObjectMapper.Builder("foo", ObjectMapper.Defaults.SUBOBJECTS).add( - new ObjectMapper.Builder("metrics", Explicit.EXPLICIT_FALSE).add(fieldBuilder) + new ObjectMapper.Builder("metrics", Optional.of(ObjectMapper.Subobjects.DISABLED)).add(fieldBuilder) ); } @@ -369,7 +365,7 @@ private ObjectMapper.Builder createObjectSubobjectsFalseLeafWithMultiField() { assertEquals("keyword", fieldMapper.leafName()); assertEquals("foo.metrics.host.name.keyword", fieldMapper.fullPath()); return new ObjectMapper.Builder("foo", ObjectMapper.Defaults.SUBOBJECTS).add( - new ObjectMapper.Builder("metrics", Explicit.EXPLICIT_FALSE).add(fieldBuilder) + new ObjectMapper.Builder("metrics", Optional.of(ObjectMapper.Subobjects.DISABLED)).add(fieldBuilder) ); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index 3c81f833985dd..49d8ba9c2ca29 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -21,9 +20,11 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.hamcrest.core.IsInstanceOf; import java.io.IOException; import java.util.List; +import java.util.Optional; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -164,7 +165,7 @@ public void testMergeEnabledForIndexTemplates() throws IOException { ObjectMapper objectMapper = mapper.mappers().objectMappers().get("object"); assertNotNull(objectMapper); assertFalse(objectMapper.isEnabled()); - assertTrue(objectMapper.subobjects()); + assertEquals(ObjectMapper.Subobjects.ENABLED, objectMapper.subobjects()); assertFalse(objectMapper.storeArraySource()); // Setting 'enabled' to true is allowed, and updates the mapping. @@ -175,7 +176,7 @@ public void testMergeEnabledForIndexTemplates() throws IOException { .startObject("object") .field("type", "object") .field("enabled", true) - .field("subobjects", false) + .field("subobjects", "auto") .field(ObjectMapper.STORE_ARRAY_SOURCE_PARAM, true) .endObject() .endObject() @@ -186,7 +187,7 @@ public void testMergeEnabledForIndexTemplates() throws IOException { objectMapper = mapper.mappers().objectMappers().get("object"); assertNotNull(objectMapper); assertTrue(objectMapper.isEnabled()); - assertFalse(objectMapper.subobjects()); + assertEquals(ObjectMapper.Subobjects.AUTO, objectMapper.subobjects()); assertTrue(objectMapper.storeArraySource()); } @@ -500,6 +501,141 @@ public void testSubobjectsCannotBeUpdatedOnRoot() throws IOException { assertEquals("the [subobjects] parameter can't be updated for the object mapping [_doc]", exception.getMessage()); } + public void testSubobjectsAuto() throws Exception { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("metrics.service"); + { + b.field("subobjects", "auto"); + b.startObject("properties"); + { + b.startObject("time"); + b.field("type", "long"); + b.endObject(); + b.startObject("time.max"); + b.field("type", "long"); + b.endObject(); + b.startObject("attributes"); + { + b.field("type", "object"); + b.field("enabled", "false"); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })); + assertNotNull(mapperService.fieldType("metrics.service.time")); + assertNotNull(mapperService.fieldType("metrics.service.time.max")); + assertNotNull(mapperService.documentMapper().mappers().objectMappers().get("metrics.service.attributes")); + } + + public void testSubobjectsAutoWithInnerObject() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("metrics.service"); + { + b.field("subobjects", "auto"); + b.startObject("properties"); + { + b.startObject("time"); + { + b.startObject("properties"); + { + b.startObject("max"); + b.field("type", "long"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + b.startObject("foo"); + b.field("type", "keyword"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })); + assertNull(mapperService.fieldType("metrics.service.time")); + assertNotNull(mapperService.fieldType("metrics.service.time.max")); + assertNotNull(mapperService.fieldType("metrics.service.foo")); + assertNotNull(mapperService.documentMapper().mappers().objectMappers().get("metrics.service.time")); + assertNotNull(mapperService.documentMapper().mappers().getMapper("metrics.service.foo")); + } + + public void testSubobjectsAutoWithInnerNested() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("metrics.service"); + { + b.field("subobjects", "auto"); + b.startObject("properties"); + { + b.startObject("time"); + b.field("type", "nested"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })); + assertThat( + mapperService.documentMapper().mappers().objectMappers().get("metrics.service.time"), + IsInstanceOf.instanceOf(NestedObjectMapper.class) + ); + } + + public void testSubobjectsAutoRoot() throws Exception { + MapperService mapperService = createMapperService(mappingWithSubobjects(b -> { + b.startObject("metrics.service.time"); + b.field("type", "long"); + b.endObject(); + b.startObject("metrics.service.time.max"); + b.field("type", "long"); + b.endObject(); + b.startObject("metrics.attributes"); + { + b.field("type", "object"); + b.field("enabled", "false"); + } + b.endObject(); + }, "auto")); + assertNotNull(mapperService.fieldType("metrics.service.time")); + assertNotNull(mapperService.fieldType("metrics.service.time.max")); + assertNotNull(mapperService.documentMapper().mappers().objectMappers().get("metrics.attributes")); + } + + public void testSubobjectsAutoRootWithInnerObject() throws IOException { + MapperService mapperService = createMapperService(mappingWithSubobjects(b -> { + b.startObject("metrics.service.time"); + { + b.startObject("properties"); + { + b.startObject("max"); + b.field("type", "long"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + }, "auto")); + assertNull(mapperService.fieldType("metrics.service.time")); + assertNotNull(mapperService.fieldType("metrics.service.time.max")); + assertNotNull(mapperService.documentMapper().mappers().objectMappers().get("metrics.service.time")); + assertNotNull(mapperService.documentMapper().mappers().getMapper("metrics.service.time.max")); + } + + public void testSubobjectsAutoRootWithInnerNested() throws IOException { + MapperService mapperService = createMapperService(mappingWithSubobjects(b -> { + b.startObject("metrics.service"); + b.field("type", "nested"); + b.endObject(); + }, "auto")); + assertThat( + mapperService.documentMapper().mappers().objectMappers().get("metrics.service"), + IsInstanceOf.instanceOf(NestedObjectMapper.class) + ); + } + /** * Makes sure that an empty object mapper returns {@code null} from * {@link SourceLoader.SyntheticFieldLoader#docValuesLoader}. This @@ -554,8 +690,8 @@ public void testStoreArraySourceNoopInNonSyntheticSourceMode() throws IOExceptio } public void testNestedObjectWithMultiFieldsgetTotalFieldsCount() { - ObjectMapper.Builder mapperBuilder = new ObjectMapper.Builder("parent_size_1", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("child_size_2", Explicit.IMPLICIT_TRUE).add( + ObjectMapper.Builder mapperBuilder = new ObjectMapper.Builder("parent_size_1", Optional.empty()).add( + new ObjectMapper.Builder("child_size_2", Optional.empty()).add( new TextFieldMapper.Builder("grand_child_size_3", createDefaultIndexAnalyzers(), false).addMultiField( new KeywordFieldMapper.Builder("multi_field_size_4", IndexVersion.current()) ) @@ -602,10 +738,26 @@ private ObjectMapper createObjectMapperWithAllParametersSet(CheckedConsumer fields = objectMapper.asFlattenedFieldMappers(rootContext).stream().map(FieldMapper::fullPath).toList(); + assertThat(fields, containsInAnyOrder("parent.keyword1", "parent.child.keyword2")); + } + + public void testFlattenSubobjectsAuto() { + MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Optional.of(ObjectMapper.Subobjects.AUTO)).add( + new ObjectMapper.Builder("child", Optional.empty()).add(new KeywordFieldMapper.Builder("keyword2", IndexVersion.current())) + ).add(new KeywordFieldMapper.Builder("keyword1", IndexVersion.current())).build(rootContext); + List fields = objectMapper.asFlattenedFieldMappers(rootContext).stream().map(FieldMapper::fullPath).toList(); + assertThat(fields, containsInAnyOrder("parent.keyword1", "parent.child.keyword2")); + } + + public void testFlattenSubobjectsFalse() { + MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Optional.of(ObjectMapper.Subobjects.DISABLED)).add( + new ObjectMapper.Builder("child", Optional.empty()).add(new KeywordFieldMapper.Builder("keyword2", IndexVersion.current())) ).add(new KeywordFieldMapper.Builder("keyword1", IndexVersion.current())).build(rootContext); List fields = objectMapper.asFlattenedFieldMappers(rootContext).stream().map(FieldMapper::fullPath).toList(); assertThat(fields, containsInAnyOrder("parent.keyword1", "parent.child.keyword2")); @@ -613,8 +765,8 @@ public void testFlatten() { public void testFlattenDynamicIncompatible() { MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); - ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("child", Explicit.IMPLICIT_TRUE).dynamic(Dynamic.FALSE) + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Optional.empty()).add( + new ObjectMapper.Builder("child", Optional.empty()).dynamic(Dynamic.FALSE) ).build(rootContext); IllegalArgumentException exception = expectThrows( @@ -631,7 +783,7 @@ public void testFlattenDynamicIncompatible() { public void testFlattenEnabledFalse() { MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); - ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).enabled(false).build(rootContext); + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Optional.empty()).enabled(false).build(rootContext); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, @@ -646,7 +798,7 @@ public void testFlattenEnabledFalse() { public void testFlattenExplicitSubobjectsTrue() { MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); - ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Explicit.EXPLICIT_TRUE).build(rootContext); + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Optional.of(ObjectMapper.Subobjects.ENABLED)).build(rootContext); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, diff --git a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java index 9cd1df700a618..ffca4352f0ae6 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java @@ -27,7 +27,6 @@ import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.Explicit; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; @@ -90,6 +89,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.function.BiFunction; import java.util.function.Function; @@ -384,7 +384,7 @@ public void testSearchRequestRuntimeFieldsAndMultifieldDetection() { public void testSyntheticSourceSearchLookup() throws IOException { // Build a mapping using synthetic source SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder(null, Settings.EMPTY, false).setSynthetic().build(); - RootObjectMapper root = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper root = new RootObjectMapper.Builder("_doc", Optional.empty()).add( new KeywordFieldMapper.Builder("cat", IndexVersion.current()).ignoreAbove(100) ).build(MapperBuilderContext.root(true, false)); Mapping mapping = new Mapping(root, new MetadataFieldMapper[] { sourceMapper }, Map.of()); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index c5aa03d5548f6..272901eb19351 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -426,8 +426,13 @@ protected static XContentBuilder topMapping(CheckedConsumer buildFields) throws IOException { + return mappingWithSubobjects(buildFields, "false"); + } + + protected static XContentBuilder mappingWithSubobjects(CheckedConsumer buildFields, String subobjects) + throws IOException { return topMapping(xContentBuilder -> { - xContentBuilder.field("subobjects", false); + xContentBuilder.field("subobjects", subobjects); xContentBuilder.startObject("properties"); buildFields.accept(xContentBuilder); xContentBuilder.endObject(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java index a8c3de84572a7..71906a720e969 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java @@ -13,7 +13,6 @@ import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; -import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -61,6 +60,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.function.Function; @@ -475,7 +475,7 @@ private static ObjectMapper createInferenceField( @Nullable SemanticTextField.ModelSettings modelSettings, Function bitSetProducer ) { - return new ObjectMapper.Builder(INFERENCE_FIELD, Explicit.EXPLICIT_TRUE).dynamic(ObjectMapper.Dynamic.FALSE) + return new ObjectMapper.Builder(INFERENCE_FIELD, Optional.of(ObjectMapper.Subobjects.ENABLED)).dynamic(ObjectMapper.Dynamic.FALSE) .add(createChunksField(indexVersionCreated, modelSettings, bitSetProducer)) .build(context); } From c1daf18bf5f5178d43dc17d3a3d1f5db9773098e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 22 Aug 2024 08:40:35 -0400 Subject: [PATCH 145/389] ESQL: Support INLINESTATS grouped on expressions (#111690) This adds support for grouping `INLINESTATS` on an expression: ``` | INLINESTATS MAX(avg_worked_seconds) BY SUBSTRING(last_name, 0, 1) ``` This functions *exactly* as thought you ran did: ``` | EVAL `SUBSTRING(last_name, 0, 1)` = SUBSTRING(last_name, 0, 1) | INLINESTATS MAX(avg_worked_seconds) BY `SUBSTRING(last_name, 0, 1)` ``` The calculated field is retained in the results. This works by running the `LogicalPlanOptimizer` before forking off plan phases. If we get sub-phases then we rerun the `LogicalPlanOptimizer` on each phase so we can fuse *stuff*. Then I had modify the optimizer rule that implements expressions in the `BY` position on `STATS` so it worked on `INLINESTATS`. And that's it? That's it?! Really? --- docs/changelog/111690.yaml | 5 + .../xpack/esql/ccq/MultiClusterSpecIT.java | 1 + .../src/main/resources/inlinestats.csv-spec | 164 +++++++++++++++++- .../src/main/resources/stats.csv-spec | 31 ++++ .../src/main/resources/union_types.csv-spec | 27 ++- .../xpack/esql/action/EsqlCapabilities.java | 5 + .../xpack/esql/analysis/Analyzer.java | 4 +- .../esql/optimizer/LogicalPlanOptimizer.java | 1 + .../xpack/esql/optimizer/OptimizerRules.java | 4 +- .../optimizer/rules/RemoveStatsOverride.java | 49 +++--- .../ReplaceStatsAggExpressionWithEval.java | 2 +- .../ReplaceStatsNestedExpressionWithEval.java | 27 ++- .../xpack/esql/plan/logical/Aggregate.java | 4 +- .../xpack/esql/plan/logical/InlineStats.java | 12 +- .../xpack/esql/plan/logical/Phased.java | 4 +- .../xpack/esql/plan/logical/Stats.java | 18 +- .../xpack/esql/session/EsqlSession.java | 39 +++-- .../elasticsearch/xpack/esql/CsvTests.java | 4 +- .../optimizer/LogicalPlanOptimizerTests.java | 26 +++ .../xpack/esql/plan/logical/PhasedTests.java | 9 +- 20 files changed, 360 insertions(+), 76 deletions(-) create mode 100644 docs/changelog/111690.yaml diff --git a/docs/changelog/111690.yaml b/docs/changelog/111690.yaml new file mode 100644 index 0000000000000..36e715744ad88 --- /dev/null +++ b/docs/changelog/111690.yaml @@ -0,0 +1,5 @@ +pr: 111690 +summary: "ESQL: Support INLINESTATS grouped on expressions" +area: ES|QL +type: enhancement +issues: [] diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java index d6ab99f0b21ac..3e799730f7269 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -111,6 +111,7 @@ protected void shouldSkipTest(String testName) throws IOException { isEnabled(testName, instructions, Clusters.oldVersion()) ); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("inlinestats")); + assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("inlinestats_v2")); } private TestFeatureService remoteFeaturesService() throws IOException { diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec index e52f1e45cead8..3f2e14f74174b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec @@ -67,11 +67,70 @@ emp_no:integer | avg_worked_seconds:long | gender:keyword | max_avg_worked_secon 10030 | 394597613 | M | 394597613 ; -// TODO allow inline calculation like BY l = SUBSTRING( maxOfLongByCalculatedKeyword -required_capability: inlinestats +required_capability: inlinestats_v2 // tag::longest-tenured-by-first[] +FROM employees +| KEEP emp_no, avg_worked_seconds, last_name +| INLINESTATS max_avg_worked_seconds = MAX(avg_worked_seconds) BY SUBSTRING(last_name, 0, 1) +| WHERE max_avg_worked_seconds == avg_worked_seconds +| SORT last_name ASC +| LIMIT 5 +// end::longest-tenured-by-first[] +; + +// tag::longest-tenured-by-first-result[] +emp_no:integer | avg_worked_seconds:long | last_name:keyword | SUBSTRING(last_name, 0, 1):keyword | max_avg_worked_seconds:long + 10065 | 372660279 | Awdeh | A | 372660279 + 10074 | 382397583 | Bernatsky | B | 382397583 + 10044 | 387408356 | Casley | C | 387408356 + 10030 | 394597613 | Demeyer | D | 394597613 + 10087 | 305782871 | Eugenio | E | 305782871 +// end::longest-tenured-by-first-result[] +; + +maxOfLongByCalculatedNamedKeyword +required_capability: inlinestats_v2 + +FROM employees +| KEEP emp_no, avg_worked_seconds, last_name +| INLINESTATS max_avg_worked_seconds = MAX(avg_worked_seconds) BY l = SUBSTRING(last_name, 0, 1) +| WHERE max_avg_worked_seconds == avg_worked_seconds +| SORT last_name ASC +| LIMIT 5 +; + +emp_no:integer | avg_worked_seconds:long | last_name:keyword | l:keyword | max_avg_worked_seconds:long + 10065 | 372660279 | Awdeh | A | 372660279 + 10074 | 382397583 | Bernatsky | B | 382397583 + 10044 | 387408356 | Casley | C | 387408356 + 10030 | 394597613 | Demeyer | D | 394597613 + 10087 | 305782871 | Eugenio | E | 305782871 +; + +maxOfLongByCalculatedDroppedKeyword +required_capability: inlinestats_v2 + +FROM employees +| INLINESTATS max_avg_worked_seconds = MAX(avg_worked_seconds) BY l = SUBSTRING(last_name, 0, 1) +| WHERE max_avg_worked_seconds == avg_worked_seconds +| KEEP emp_no, avg_worked_seconds, last_name, max_avg_worked_seconds +| SORT last_name ASC +| LIMIT 5 +; + +emp_no:integer | avg_worked_seconds:long | last_name:keyword | max_avg_worked_seconds:long + 10065 | 372660279 | Awdeh | 372660279 + 10074 | 382397583 | Bernatsky | 382397583 + 10044 | 387408356 | Casley | 387408356 + 10030 | 394597613 | Demeyer | 394597613 + 10087 | 305782871 | Eugenio | 305782871 +; + +maxOfLongByEvaledKeyword +required_capability: inlinestats + FROM employees | EVAL l = SUBSTRING(last_name, 0, 1) | KEEP emp_no, avg_worked_seconds, l @@ -79,17 +138,14 @@ FROM employees | WHERE max_avg_worked_seconds == avg_worked_seconds | SORT l ASC | LIMIT 5 -// end::longest-tenured-by-first[] ; -// tag::longest-tenured-by-first-result[] emp_no:integer | avg_worked_seconds:long | l:keyword | max_avg_worked_seconds:long 10065 | 372660279 | A | 372660279 10074 | 382397583 | B | 382397583 10044 | 387408356 | C | 387408356 10030 | 394597613 | D | 394597613 10087 | 305782871 | E | 305782871 -// end::longest-tenured-by-first-result[] ; maxOfLongByInt @@ -499,3 +555,101 @@ emp_no:integer | salary:integer | ninety_fifth_salary:double 10029 | 74999 | 73584.95 10045 | 74970 | 73584.95 ; + +byTwoCalculated +required_capability: inlinestats_v2 + +FROM airports +| WHERE abbrev IS NOT NULL +| KEEP abbrev, scalerank, location +| INLINESTATS min_sl=MIN(scalerank) + BY lat_10 = ROUND(ST_Y(location), -1) + , lon_10 = ROUND(ST_X(location), -1) +| SORT abbrev DESC +| LIMIT 3 +; + +abbrev:keyword | scalerank:integer | location:geo_point | lat_10:double | lon_10:double | min_sl:integer + ZRH | 3 | POINT(8.56221279534765 47.4523895064915) | 50 | 10 | 2 + ZNZ | 4 | POINT (39.2223319841558 -6.21857034620282) | -10 | 40 | 4 + ZLO | 7 | POINT (-104.560095200097 19.1480860285854) | 20 | -100 | 2 +; + +byTwoCalculatedSecondOverwrites +required_capability: inlinestats_v2 + +FROM airports +| WHERE abbrev IS NOT NULL +| KEEP abbrev, scalerank, location +| INLINESTATS min_sl=MIN(scalerank) + BY x = ROUND(ST_Y(location), -1) + , x = ROUND(ST_X(location), -1) +| SORT abbrev DESC +| LIMIT 3 +; + +abbrev:keyword | scalerank:integer | location:geo_point | x:double | min_sl:integer + ZRH | 3 | POINT(8.56221279534765 47.4523895064915) | 10 | 2 + ZNZ | 4 | POINT (39.2223319841558 -6.21857034620282) | 40 | 2 + ZLO | 7 | POINT (-104.560095200097 19.1480860285854) | -100 | 2 +; + +byTwoCalculatedSecondOverwritesReferencingFirst +required_capability: inlinestats_v2 + +FROM airports +| WHERE abbrev IS NOT NULL +| KEEP abbrev, scalerank, location +| EVAL x = ST_X(location) +| INLINESTATS min_sl=MIN(scalerank) + BY x = ROUND(x, -1) + , x = ROUND(x, -1) +| SORT abbrev DESC +| LIMIT 3 +; + +abbrev:keyword | scalerank:integer | location:geo_point | x:double | min_sl:integer + ZRH | 3 | POINT(8.56221279534765 47.4523895064915) | 10 | 2 + ZNZ | 4 | POINT (39.2223319841558 -6.21857034620282) | 40 | 2 + ZLO | 7 | POINT (-104.560095200097 19.1480860285854) | -100 | 2 +; + + +groupShadowsAgg +required_capability: inlinestats_v2 + +FROM airports +| WHERE abbrev IS NOT NULL +| KEEP abbrev, scalerank, location +| INLINESTATS min_sl=MIN(scalerank) + , lat_10 = ROUND(ST_Y(location), -1) + BY lat_10 = ROUND(ST_Y(location), -1) + , lon_10 = ROUND(ST_X(location), -1) +| SORT abbrev DESC +| LIMIT 3 +; + +abbrev:keyword | scalerank:integer | location:geo_point | lat_10:double | lon_10:double | min_sl:integer + ZRH | 3 | POINT(8.56221279534765 47.4523895064915) | 50 | 10 | 2 + ZNZ | 4 | POINT (39.2223319841558 -6.21857034620282) | -10 | 40 | 4 + ZLO | 7 | POINT (-104.560095200097 19.1480860285854) | 20 | -100 | 2 +; + +groupShadowsField +required_capability: inlinestats_v2 + + FROM employees +| KEEP emp_no, salary, hire_date +| INLINESTATS avg_salary = AVG(salary) + BY hire_date = DATE_TRUNC(1 year, hire_date) +| WHERE salary > avg_salary +| SORT emp_no ASC +| LIMIT 4 +; + +emp_no:integer | salary:integer | hire_date:datetime | avg_salary:double + 10001 | 57305 | 1986-01-01T00:00:00Z | 43869.63636363636 + 10002 | 56371 | 1985-01-01T00:00:00Z | 51831.818181818184 + 10003 | 61805 | 1986-01-01T00:00:00Z | 43869.63636363636 + 10005 | 63528 | 1989-01-01T00:00:00Z | 53487.07692307692 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index fc607edf4d212..3be846630d5b8 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -1618,6 +1618,37 @@ m:i | o:i | l:i | s:i 1 | 39729 | 1 | 39729 ; +byTwoCalculatedSecondOverwrites +FROM employees +| STATS m = MAX(salary) by l = salary + 1, l = languages + 1 +| SORT m +| LIMIT 5 +; + + m:i | l:i +66817 | 6 +73578 | 3 +73717 | 2 +74572 | 5 +74970 | 4 +; + +byTwoCalculatedSecondOverwritesReferencingFirst +FROM employees +| EVAL l = languages +| STATS m = MAX(salary) by l = l + 1, l = l + 1 +| SORT m +| LIMIT 5 +; + + m:i | l:i +66817 | 6 +73578 | 3 +73717 | 2 +74572 | 5 +74970 | 4 +; + nestedAggsOverGroupingWithAliasAndProjection#[skip:-8.13.99,reason:supported in 8.14] FROM employees | STATS e = length(f) + 1, c = count(*) by f = first_name diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec index 6d1d4c7892886..6819727be0131 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec @@ -977,7 +977,25 @@ event_duration:long | _index:keyword | ts:date | ts_str:k ; -inlineStatsUnionGroup +inlineStatsUnionGroup-Ignore +required_capability: union_types +required_capability: inlinestats + +FROM sample_data, sample_data_ts_long +| INLINESTATS count = COUNT(*) + BY @timestamp = SUBSTRING(TO_STRING(@timestamp), 0, 7) +| SORT client_ip ASC, @timestamp ASC +| LIMIT 4 +; + +client_ip:ip | event_duration:long | message:keyword | @timestamp:keyword | count:long + 172.21.0.5 | 1232382 | Disconnected | 1698068 | 1 + 172.21.0.5 | 1232382 | Disconnected | 2023-10 | 7 +172.21.2.113 | 2764889 | Connected to 10.1.0.2 | 1698064 | 1 +172.21.2.113 | 2764889 | Connected to 10.1.0.2 | 2023-10 | 7 +; + +inlineStatsUnionGroupWithEval-Ignore required_capability: union_types required_capability: inlinestats @@ -993,16 +1011,15 @@ client_ip:ip | event_duration:long | message:keyword | @timestamp:keyword 172.21.0.5 | 1232382 | Disconnected | 2023-10 | 7 172.21.2.113 | 2764889 | Connected to 10.1.0.2 | 1698064 | 1 172.21.2.113 | 2764889 | Connected to 10.1.0.2 | 2023-10 | 7 - ; -inlineStatsUnionGroupTogether +inlineStatsUnionGroupTogether-Ignore required_capability: union_types required_capability: inlinestats FROM sample_data, sample_data_ts_long -| EVAL @timestamp = TO_STRING(TO_DATETIME(@timestamp)) -| INLINESTATS count = COUNT(*) BY @timestamp +| INLINESTATS count = COUNT(*) + BY @timestamp = TO_STRING(TO_DATETIME(@timestamp)) | SORT client_ip ASC, @timestamp ASC | LIMIT 4 ; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index b60701fe19365..8d478408e8781 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -57,6 +57,11 @@ public enum Cap { */ INLINESTATS(EsqlPlugin.INLINESTATS_FEATURE_FLAG), + /** + * Support for the expressions in grouping in {@code INLINESTATS} syntax. + */ + INLINESTATS_V2(EsqlPlugin.INLINESTATS_FEATURE_FLAG), + /** * Support for aggregation function {@code TOP}. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 3ffb4acbe6455..5b59117ad356b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -454,7 +454,7 @@ private LogicalPlan resolveStats(Stats stats, List childrenOutput) { } groupings = newGroupings; if (changed.get()) { - stats = stats.with(newGroupings, stats.aggregates()); + stats = stats.with(stats.child(), newGroupings, stats.aggregates()); changed.set(false); } } @@ -483,7 +483,7 @@ private LogicalPlan resolveStats(Stats stats, List childrenOutput) { newAggregates.add(agg); } - stats = changed.get() ? stats.with(groupings, newAggregates) : stats; + stats = changed.get() ? stats.with(stats.child(), groupings, newAggregates) : stats; } return (LogicalPlan) stats; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index e55b090bbb35f..282f46e0de7bb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -155,6 +155,7 @@ public LogicalPlan optimize(LogicalPlan verified) { if (failures.hasFailures()) { throw new VerificationException(failures); } + optimized.setOptimized(); return optimized; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java index 4d3134db34a0d..733fe2e8762bb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java @@ -17,6 +17,7 @@ import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; +import org.elasticsearch.xpack.esql.plan.logical.InlineStats; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; import org.elasticsearch.xpack.esql.plan.logical.Row; @@ -99,7 +100,8 @@ protected AttributeSet generates(LogicalPlan logicalPlan) { if (logicalPlan instanceof EsRelation || logicalPlan instanceof LocalRelation || logicalPlan instanceof Row - || logicalPlan instanceof Aggregate) { + || logicalPlan instanceof Aggregate + || logicalPlan instanceof InlineStats) { return logicalPlan.outputSet(); } if (logicalPlan instanceof GeneratingPlan generating) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java index 5592a04e2f813..0f8e0f450e585 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java @@ -11,26 +11,30 @@ import org.elasticsearch.xpack.esql.analysis.AnalyzerRules; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.Stats; import java.util.ArrayList; import java.util.List; /** - * Rule that removes Aggregate overrides in grouping, aggregates and across them inside. - * The overrides appear when the same alias is used multiple times in aggregations and/or groupings: - * STATS x = COUNT(*), x = MIN(a) BY x = b + 1, x = c + 10 + * Removes {@link Stats} overrides in grouping, aggregates and across them inside. + * The overrides appear when the same alias is used multiple times in aggregations + * and/or groupings: + * {@code STATS x = COUNT(*), x = MIN(a) BY x = b + 1, x = c + 10} * becomes - * STATS BY x = c + 10 - * That is the last declaration for a given alias, overrides all the other declarations, with - * groups having priority vs aggregates. + * {@code STATS BY x = c + 10} + * and + * {@code INLINESTATS x = COUNT(*), x = MIN(a) BY x = b + 1, x = c + 10} + * becomes + * {@code INLINESTATS BY x = c + 10} + * This is "last one wins", with groups having priority over aggregates. * Separately, it replaces expressions used as group keys inside the aggregates with references: - * STATS max(a + b + 1) BY a + b + * {@code STATS max(a + b + 1) BY a + b} * becomes - * STATS max($x + 1) BY $x = a + b + * {@code STATS max($x + 1) BY $x = a + b} */ -public final class RemoveStatsOverride extends AnalyzerRules.AnalyzerRule { +public final class RemoveStatsOverride extends AnalyzerRules.AnalyzerRule { @Override protected boolean skipResolved() { @@ -38,19 +42,18 @@ protected boolean skipResolved() { } @Override - protected LogicalPlan rule(Aggregate agg) { - return agg.resolved() ? removeAggDuplicates(agg) : agg; - } - - private static Aggregate removeAggDuplicates(Aggregate agg) { - var groupings = agg.groupings(); - var aggregates = agg.aggregates(); - - groupings = removeDuplicateNames(groupings); - aggregates = removeDuplicateNames(aggregates); - - // replace EsqlAggregate with Aggregate - return new Aggregate(agg.source(), agg.child(), agg.aggregateType(), groupings, aggregates); + protected LogicalPlan rule(LogicalPlan p) { + if (p.resolved() == false) { + return p; + } + if (p instanceof Stats stats) { + return (LogicalPlan) stats.with( + stats.child(), + removeDuplicateNames(stats.groupings()), + removeDuplicateNames(stats.aggregates()) + ); + } + return p; } private static List removeDuplicateNames(List list) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java index 1746931f9a63e..ea0a302f7131d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java @@ -34,7 +34,7 @@ * becomes * stats a1 = sum(a), a2 = min(b) by x | eval a = a1 + a2 | keep a, x * The rule also considers expressions applied over groups: - * stats a = x + 1 by x becomes stats by x | eval a = x + 1 | keep a, x + * {@code STATS a = x + 1 BY x} becomes {@code STATS BY x | EVAL a = x + 1 | KEEP a, x} * And to combine the two: * stats a = x + count(*) by x * becomes diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java index 206bd6d3d1c76..02b39f6babef0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java @@ -15,9 +15,9 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.Stats; import java.util.ArrayList; import java.util.HashMap; @@ -25,15 +25,26 @@ import java.util.Map; /** - * Replace nested expressions inside an aggregate with synthetic eval (which end up being projected away by the aggregate). - * stats sum(a + 1) by x % 2 + * Replace nested expressions inside a {@link Stats} with synthetic eval. + * {@code STATS SUM(a + 1) BY x % 2} * becomes - * eval `a + 1` = a + 1, `x % 2` = x % 2 | stats sum(`a+1`_ref) by `x % 2`_ref + * {@code EVAL `a + 1` = a + 1, `x % 2` = x % 2 | STATS SUM(`a+1`_ref) BY `x % 2`_ref} + * and + * {@code INLINESTATS SUM(a + 1) BY x % 2} + * becomes + * {@code EVAL `a + 1` = a + 1, `x % 2` = x % 2 | INLINESTATS SUM(`a+1`_ref) BY `x % 2`_ref} */ -public final class ReplaceStatsNestedExpressionWithEval extends OptimizerRules.OptimizerRule { +public final class ReplaceStatsNestedExpressionWithEval extends OptimizerRules.OptimizerRule { @Override - protected LogicalPlan rule(Aggregate aggregate) { + protected LogicalPlan rule(LogicalPlan p) { + if (p instanceof Stats stats) { + return rule(stats); + } + return p; + } + + private LogicalPlan rule(Stats aggregate) { List evals = new ArrayList<>(); Map evalNames = new HashMap<>(); Map groupingAttributes = new HashMap<>(); @@ -134,10 +145,10 @@ protected LogicalPlan rule(Aggregate aggregate) { var aggregates = aggsChanged.get() ? newAggs : aggregate.aggregates(); var newEval = new Eval(aggregate.source(), aggregate.child(), evals); - aggregate = new Aggregate(aggregate.source(), newEval, aggregate.aggregateType(), groupings, aggregates); + aggregate = aggregate.with(newEval, groupings, aggregates); } - return aggregate; + return (LogicalPlan) aggregate; } static String syntheticName(Expression expression, AggregateFunction af, int counter) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java index 01132425df11f..5b6fe8c0112c6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java @@ -108,8 +108,8 @@ public Aggregate replaceChild(LogicalPlan newChild) { } @Override - public Aggregate with(List newGroupings, List newAggregates) { - return new Aggregate(source(), child(), aggregateType(), newGroupings, newAggregates); + public Aggregate with(LogicalPlan child, List newGroupings, List newAggregates) { + return new Aggregate(source(), child, aggregateType(), newGroupings, newAggregates); } public AggregateType aggregateType() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java index 187b3542e0607..b37976c00ad06 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java @@ -98,8 +98,8 @@ public InlineStats replaceChild(LogicalPlan newChild) { } @Override - public InlineStats with(List newGroupings, List newAggregates) { - return new InlineStats(source(), child(), newGroupings, newAggregates); + public InlineStats with(LogicalPlan child, List newGroupings, List newAggregates) { + return new InlineStats(source(), child, newGroupings, newAggregates); } @Override @@ -121,11 +121,13 @@ public boolean expressionsResolved() { public List output() { if (this.lazyOutput == null) { List addedFields = new ArrayList<>(); - AttributeSet childOutput = child().outputSet(); + AttributeSet set = child().outputSet(); for (NamedExpression agg : aggregates) { - if (childOutput.contains(agg) == false) { + Attribute att = agg.toAttribute(); + if (set.contains(att) == false) { addedFields.add(agg); + set.add(att); } } @@ -207,7 +209,7 @@ private LogicalPlan groupedNextPhase(List schema, List firstPha if (g instanceof Attribute a) { groupingAttributes.add(a); } else { - throw new UnsupportedOperationException("INLINESTATS doesn't support expressions in grouping position yet"); + throw new IllegalStateException("optimized plans should only have attributes in groups, but got [" + g + "]"); } } List leftFields = new ArrayList<>(groupingAttributes.size()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Phased.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Phased.java index ba0f97cdfa30b..6923f9e137eab 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Phased.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Phased.java @@ -91,8 +91,8 @@ public interface Phased { * Or {@code null} if there aren't any {@linkplain Phased} operations. */ static LogicalPlan extractFirstPhase(LogicalPlan plan) { - if (false == plan.analyzed()) { - throw new IllegalArgumentException("plan must be analyzed"); + if (false == plan.optimized()) { + throw new IllegalArgumentException("plan must be optimized"); } var firstPhase = new Holder(); plan.forEachUp(t -> { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Stats.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Stats.java index 35d5229d4e52f..c46c735e7482e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Stats.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Stats.java @@ -9,6 +9,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.tree.Source; import java.util.List; @@ -16,10 +17,25 @@ * STATS-like operations. Like {@link Aggregate} and {@link InlineStats}. */ public interface Stats { + /** + * The user supplied text in the query for this command. + */ + Source source(); + /** * Rebuild this plan with new groupings and new aggregates. */ - Stats with(List newGroupings, List newAggregates); + Stats with(LogicalPlan child, List newGroupings, List newAggregates); + + /** + * Have all the expressions in this plan been resolved? + */ + boolean expressionsResolved(); + + /** + * The operation directly before this one in the plan. + */ + LogicalPlan child(); /** * List containing both the aggregate expressions and grouping expressions. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index a6bc7befccc80..25d155ccfde07 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -125,7 +125,9 @@ public void execute( LOGGER.debug("ESQL query:\n{}", request.query()); analyzedPlan( parse(request.query(), request.params()), - listener.delegateFailureAndWrap((next, analyzedPlan) -> executeAnalyzedPlan(request, runPhase, analyzedPlan, next)) + listener.delegateFailureAndWrap( + (next, analyzedPlan) -> executeOptimizedPlan(request, runPhase, optimizedPlan(analyzedPlan), next) + ) ); } @@ -133,17 +135,17 @@ public void execute( * Execute an analyzed plan. Most code should prefer calling {@link #execute} but * this is public for testing. See {@link Phased} for the sequence of operations. */ - public void executeAnalyzedPlan( + public void executeOptimizedPlan( EsqlQueryRequest request, BiConsumer> runPhase, - LogicalPlan analyzedPlan, + LogicalPlan optimizedPlan, ActionListener listener ) { - LogicalPlan firstPhase = Phased.extractFirstPhase(analyzedPlan); + LogicalPlan firstPhase = Phased.extractFirstPhase(optimizedPlan); if (firstPhase == null) { - runPhase.accept(logicalPlanToPhysicalPlan(analyzedPlan, request), listener); + runPhase.accept(logicalPlanToPhysicalPlan(optimizedPlan, request), listener); } else { - executePhased(new ArrayList<>(), analyzedPlan, request, firstPhase, runPhase, listener); + executePhased(new ArrayList<>(), optimizedPlan, request, firstPhase, runPhase, listener); } } @@ -155,11 +157,11 @@ private void executePhased( BiConsumer> runPhase, ActionListener listener ) { - PhysicalPlan physicalPlan = logicalPlanToPhysicalPlan(firstPhase, request); + PhysicalPlan physicalPlan = logicalPlanToPhysicalPlan(optimizedPlan(firstPhase), request); runPhase.accept(physicalPlan, listener.delegateFailureAndWrap((next, result) -> { try { profileAccumulator.addAll(result.profiles()); - LogicalPlan newMainPlan = Phased.applyResultsFromFirstPhase(mainPlan, physicalPlan.output(), result.pages()); + LogicalPlan newMainPlan = optimizedPlan(Phased.applyResultsFromFirstPhase(mainPlan, physicalPlan.output(), result.pages())); LogicalPlan newFirstPhase = Phased.extractFirstPhase(newMainPlan); if (newFirstPhase == null) { PhysicalPlan finalPhysicalPlan = logicalPlanToPhysicalPlan(newMainPlan, request); @@ -235,7 +237,7 @@ private void preAnalyze(LogicalPlan parsed, BiFunction void preAnalyzeIndices(LogicalPlan parsed, ActionListener listener, Set enrichPolicyMatchFields) { + private void preAnalyzeIndices(LogicalPlan parsed, ActionListener listener, Set enrichPolicyMatchFields) { PreAnalyzer.PreAnalysis preAnalysis = new PreAnalyzer().preAnalyze(parsed); // TODO we plan to support joins in the future when possible, but for now we'll just fail early if we see one if (preAnalysis.indices.size() > 1) { @@ -352,8 +354,8 @@ private static Set subfields(Set names) { return names.stream().filter(name -> name.endsWith(WILDCARD) == false).map(name -> name + ".*").collect(Collectors.toSet()); } - private PhysicalPlan logicalPlanToPhysicalPlan(LogicalPlan logicalPlan, EsqlQueryRequest request) { - PhysicalPlan physicalPlan = optimizedPhysicalPlan(logicalPlan); + private PhysicalPlan logicalPlanToPhysicalPlan(LogicalPlan optimizedPlan, EsqlQueryRequest request) { + PhysicalPlan physicalPlan = optimizedPhysicalPlan(optimizedPlan); physicalPlan = physicalPlan.transformUp(FragmentExec.class, f -> { QueryBuilder filter = request.filter(); if (filter != null) { @@ -371,20 +373,25 @@ private PhysicalPlan logicalPlanToPhysicalPlan(LogicalPlan logicalPlan, EsqlQuer } public LogicalPlan optimizedPlan(LogicalPlan logicalPlan) { - assert logicalPlan.analyzed(); + if (logicalPlan.analyzed() == false) { + throw new IllegalStateException("Expected analyzed plan"); + } var plan = logicalPlanOptimizer.optimize(logicalPlan); LOGGER.debug("Optimized logicalPlan plan:\n{}", plan); return plan; } - public PhysicalPlan physicalPlan(LogicalPlan logicalPlan) { - var plan = mapper.map(optimizedPlan(logicalPlan)); + public PhysicalPlan physicalPlan(LogicalPlan optimizedPlan) { + if (optimizedPlan.optimized() == false) { + throw new IllegalStateException("Expected optimized plan"); + } + var plan = mapper.map(optimizedPlan); LOGGER.debug("Physical plan:\n{}", plan); return plan; } - public PhysicalPlan optimizedPhysicalPlan(LogicalPlan logicalPlan) { - var plan = physicalPlanOptimizer.optimize(physicalPlan(logicalPlan)); + public PhysicalPlan optimizedPhysicalPlan(LogicalPlan optimizedPlan) { + var plan = physicalPlanOptimizer.optimize(physicalPlan(optimizedPlan)); LOGGER.debug("Optimized physical plan:\n{}", plan); return plan; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 76e0466af4da0..f30db1bf9bba2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -415,10 +415,10 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { PlainActionFuture listener = new PlainActionFuture<>(); - session.executeAnalyzedPlan( + session.executeOptimizedPlan( new EsqlQueryRequest(), runPhase(bigArrays, physicalOperationProviders), - analyzed, + session.optimizedPlan(analyzed), listener.delegateFailureAndWrap( // Wrap so we can capture the warnings in the calling thread (next, result) -> next.onResponse( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index a294f33ece5c3..74f95e3defbd3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -127,6 +127,7 @@ import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.Filter; import org.elasticsearch.xpack.esql.plan.logical.Grok; +import org.elasticsearch.xpack.esql.plan.logical.InlineStats; import org.elasticsearch.xpack.esql.plan.logical.Limit; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; @@ -4542,6 +4543,31 @@ public void testReplaceSortByExpressionsWithStats() { as(aggregate.child(), EsRelation.class); } + /** + * Expects + * Limit[1000[INTEGER]] + * \_InlineStats[[emp_no % 2{r}#6],[COUNT(salary{f}#12) AS c, emp_no % 2{r}#6]] + * \_Eval[[emp_no{f}#7 % 2[INTEGER] AS emp_no % 2]] + * \_EsRelation[test][_meta_field{f}#13, emp_no{f}#7, first_name{f}#8, ge..] + */ + public void testInlinestatsNestedExpressionsInGroups() { + var plan = optimizedPlan(""" + FROM test + | INLINESTATS c = COUNT(salary) by emp_no % 2 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), InlineStats.class); + var groupings = agg.groupings(); + var aggs = agg.aggregates(); + var ref = as(groupings.get(0), ReferenceAttribute.class); + assertThat(aggs.get(1), is(ref)); + var eval = as(agg.child(), Eval.class); + assertThat(eval.fields(), hasSize(1)); + assertThat(eval.fields().get(0).toAttribute(), is(ref)); + assertThat(eval.fields().get(0).name(), is("emp_no % 2")); + } + /** * Expects * diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java index 9a0f1ba3efe1d..5e45de6c77c42 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java @@ -31,14 +31,14 @@ public class PhasedTests extends ESTestCase { public void testZeroLayers() { EsRelation relation = new EsRelation(Source.synthetic("relation"), new EsIndex("foo", Map.of()), IndexMode.STANDARD, false); - relation.setAnalyzed(); + relation.setOptimized(); assertThat(Phased.extractFirstPhase(relation), nullValue()); } public void testOneLayer() { EsRelation relation = new EsRelation(Source.synthetic("relation"), new EsIndex("foo", Map.of()), IndexMode.STANDARD, false); LogicalPlan orig = new Dummy(Source.synthetic("orig"), relation); - orig.setAnalyzed(); + orig.setOptimized(); assertThat(Phased.extractFirstPhase(orig), sameInstance(relation)); LogicalPlan finalPhase = Phased.applyResultsFromFirstPhase( orig, @@ -49,6 +49,7 @@ public void testOneLayer() { finalPhase, equalTo(new Row(orig.source(), List.of(new Alias(orig.source(), "foo", new Literal(orig.source(), "foo", DataType.KEYWORD))))) ); + finalPhase.setOptimized(); assertThat(Phased.extractFirstPhase(finalPhase), nullValue()); } @@ -56,7 +57,7 @@ public void testTwoLayer() { EsRelation relation = new EsRelation(Source.synthetic("relation"), new EsIndex("foo", Map.of()), IndexMode.STANDARD, false); LogicalPlan inner = new Dummy(Source.synthetic("inner"), relation); LogicalPlan orig = new Dummy(Source.synthetic("outer"), inner); - orig.setAnalyzed(); + orig.setOptimized(); assertThat( "extractFirstPhase should call #firstPhase on the earliest child in the plan", Phased.extractFirstPhase(orig), @@ -67,6 +68,7 @@ public void testTwoLayer() { List.of(new ReferenceAttribute(Source.EMPTY, "foo", DataType.KEYWORD)), List.of() ); + secondPhase.setOptimized(); assertThat( "applyResultsFromFirstPhase should call #nextPhase one th earliest child in the plan", secondPhase, @@ -84,6 +86,7 @@ public void testTwoLayer() { List.of(new ReferenceAttribute(Source.EMPTY, "foo", DataType.KEYWORD)), List.of() ); + finalPhase.setOptimized(); assertThat( finalPhase, equalTo(new Row(orig.source(), List.of(new Alias(orig.source(), "foo", new Literal(orig.source(), "foo", DataType.KEYWORD))))) From f481b0722bf47f03bd00ade814d3347f4555e9d0 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Thu, 22 Aug 2024 14:51:19 +0200 Subject: [PATCH 146/389] Always check crsType when folding spatial functions (#112090) * Always check crsType when folding spatial functions * Update docs/changelog/112090.yaml * Only require capability for fixed test The other tests passed on older versions anyway. --- docs/changelog/112090.yaml | 6 ++ .../src/main/resources/spatial.csv-spec | 77 +++++++++++++++++++ .../xpack/esql/action/EsqlCapabilities.java | 5 ++ .../scalar/spatial/BinarySpatialFunction.java | 2 +- .../scalar/spatial/SpatialContains.java | 6 +- .../scalar/spatial/SpatialDisjoint.java | 6 +- .../scalar/spatial/SpatialIntersects.java | 6 +- .../scalar/spatial/SpatialWithin.java | 6 +- .../function/scalar/spatial/StDistance.java | 2 +- 9 files changed, 102 insertions(+), 14 deletions(-) create mode 100644 docs/changelog/112090.yaml diff --git a/docs/changelog/112090.yaml b/docs/changelog/112090.yaml new file mode 100644 index 0000000000000..6d6e4d0851523 --- /dev/null +++ b/docs/changelog/112090.yaml @@ -0,0 +1,6 @@ +pr: 112090 +summary: Always check `crsType` when folding spatial functions +area: Geo +type: bug +issues: + - 112089 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec index 02067e9dbe490..35416c7945128 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec @@ -487,6 +487,17 @@ POINT (42.97109629958868 14.7552534006536) | 1 ############################################### # Tests for ST_INTERSECTS on GEO_POINT type +literalGeoPointIntersectsLiteralPolygon +required_capability: st_intersects + +ROW pt = TO_GEOPOINT("POINT(0 85)"), polygon = TO_GEOSHAPE("POLYGON((-10 70, 10 70, 10 85, -10 85, -10 70))") +| EVAL intersects = ST_INTERSECTS(pt, polygon) +; + +pt:geo_point | polygon:geo_shape | intersects:boolean +POINT(0 85) | POLYGON((-10 70, 10 70, 10 85, -10 85, -10 70)) | true +; + pointIntersectsLiteralPolygon required_capability: st_intersects @@ -889,6 +900,34 @@ wkt:keyword | pt:geo_point | distance:double "POINT(1 -1)" | POINT(1 -1) | 157249.59498573805 ; +literalGeoPointDistanceOneDegree +required_capability: st_distance + +ROW wkt = ["POINT(1 0)", "POINT(-1 0)", "POINT(0 1)", "POINT(0 -1)"] +| MV_EXPAND wkt +| EVAL pt = TO_GEOPOINT(wkt) +| EVAL distance = ST_DISTANCE(pt, TO_GEOPOINT("POINT(0 0)")) +; + +wkt:keyword | pt:geo_point | distance:double +"POINT(1 0)" | POINT(1 0) | 111195.07310665186 +"POINT(-1 0)" | POINT(-1 0) | 111195.08242688453 +"POINT(0 1)" | POINT(0 1) | 111195.07776676829 +"POINT(0 -1)" | POINT(0 -1) | 111195.08242688453 +; + +twoCitiesPointDistanceGeo +required_capability: st_distance +required_capability: spatial_functions_fix_crstype_folding + +ROW p1 = TO_GEOPOINT("POINT(-90.82814 29.79511)"), p2 = TO_GEOPOINT("POINT(-90.79731509999999 29.8835389)") +| EVAL d = ST_DISTANCE(p1, p2) +; + +p1:geo_point | p2:geo_point | d:double +POINT (-90.82814 29.79511) | POINT (-90.79731509999999 29.8835389) | 10272.529272836206 +; + airportCityLocationPointDistance required_capability: st_distance @@ -1433,6 +1472,17 @@ POINT (726480.0130685265 3359566.331716279) | 849 ############################################### # Tests for ST_INTERSECTS on CARTESIAN_POINT type +literalCartesianPointIntersectsLiteralPolygon +required_capability: st_intersects + +ROW pt = TO_CARTESIANPOINT("POINT(0 85)"), polygon = TO_CARTESIANSHAPE("POLYGON((-10 70, 10 70, 10 85, -10 85, -10 70))") +| EVAL intersects = ST_INTERSECTS(pt, polygon) +; + +pt:cartesian_point | polygon:cartesian_shape | intersects:boolean +POINT(0 85) | POLYGON((-10 70, 10 70, 10 85, -10 85, -10 70)) | true +; + cartesianCentroidFromAirportsAfterIntersectsPredicate required_capability: st_intersects @@ -1996,6 +2046,33 @@ wkt:keyword | pt:cartesian_point | distance:double "POINT(1 -1)" | POINT(1 -1) | 1.4142135623730951 ; +literalCartesianPointDistanceOneUnit +required_capability: st_distance + +ROW wkt = ["POINT(1 0)", "POINT(-1 0)", "POINT(0 1)", "POINT(0 -1)"] +| MV_EXPAND wkt +| EVAL pt = TO_CARTESIANPOINT(wkt) +| EVAL distance = ST_DISTANCE(pt, TO_CARTESIANPOINT("POINT(0 0)")) +; + +wkt:keyword | pt:cartesian_point | distance:double +"POINT(1 0)" | POINT(1 0) | 1.0 +"POINT(-1 0)" | POINT(-1 0) | 1.0 +"POINT(0 1)" | POINT(0 1) | 1.0 +"POINT(0 -1)" | POINT(0 -1) | 1.0 +; + +twoCitiesPointDistanceCartesian +required_capability: st_distance + +ROW p1 = TO_CARTESIANPOINT("POINT(-90.82814 29.79511)"), p2 = TO_CARTESIANPOINT("POINT(-90.79731509999999 29.8835389)") +| EVAL d = ST_DISTANCE(p1, p2) +; + +p1:cartesian_point | p2:cartesian_point | d:double +POINT (-90.82814 29.79511) | POINT (-90.79731509999999 29.8835389) | 0.09364744959271905 +; + airportCartesianCityLocationPointDistance required_capability: st_distance diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 8d478408e8781..afa8b6e1d06d7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -135,6 +135,11 @@ public enum Cap { */ ST_DISTANCE, + /** + * Fix determination of CRS types in spatial functions when folding. + */ + SPATIAL_FUNCTIONS_FIX_CRSTYPE_FOLDING, + /** * Fix to GROK and DISSECT that allows extracting attributes with the same name as the input * https://github.com/elastic/elasticsearch/issues/110184 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java index d34ff30d9b87b..84d776888c7ae 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java @@ -42,7 +42,7 @@ public static List getNamedWriteables() { } private final SpatialTypeResolver spatialTypeResolver; - protected SpatialCrsType crsType; + private SpatialCrsType crsType; protected final boolean leftDocValues; protected final boolean rightDocValues; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java index afa2ba833dcd1..6cb3c34ba8b1f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java @@ -176,10 +176,10 @@ protected NodeInfo info() { @Override public Object fold() { try { - GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType, left()); + GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType(), left()); Geometry rightGeom = makeGeometryFromLiteral(right()); - Component2D[] components = asLuceneComponent2Ds(crsType, rightGeom); - return (crsType == SpatialCrsType.GEO) + Component2D[] components = asLuceneComponent2Ds(crsType(), rightGeom); + return (crsType() == SpatialCrsType.GEO) ? GEO.geometryRelatesGeometries(docValueReader, components) : CARTESIAN.geometryRelatesGeometries(docValueReader, components); } catch (IOException e) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java index 9e37bf4c8fa51..d04dc9e1a6b07 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java @@ -131,9 +131,9 @@ protected NodeInfo info() { @Override public Object fold() { try { - GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType, left()); - Component2D component2D = asLuceneComponent2D(crsType, right()); - return (crsType == SpatialCrsType.GEO) + GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType(), left()); + Component2D component2D = asLuceneComponent2D(crsType(), right()); + return (crsType() == SpatialCrsType.GEO) ? GEO.geometryRelatesGeometry(docValueReader, component2D) : CARTESIAN.geometryRelatesGeometry(docValueReader, component2D); } catch (IOException e) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java index b7aaededf76f5..48e99989c5699 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java @@ -129,9 +129,9 @@ protected NodeInfo info() { @Override public Object fold() { try { - GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType, left()); - Component2D component2D = asLuceneComponent2D(crsType, right()); - return (crsType == SpatialCrsType.GEO) + GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType(), left()); + Component2D component2D = asLuceneComponent2D(crsType(), right()); + return (crsType() == SpatialCrsType.GEO) ? GEO.geometryRelatesGeometry(docValueReader, component2D) : CARTESIAN.geometryRelatesGeometry(docValueReader, component2D); } catch (IOException e) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java index 297a6b40c2175..c204468ae17d1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java @@ -131,9 +131,9 @@ protected NodeInfo info() { @Override public Object fold() { try { - GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType, left()); - Component2D component2D = asLuceneComponent2D(crsType, right()); - return (crsType == SpatialCrsType.GEO) + GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType(), left()); + Component2D component2D = asLuceneComponent2D(crsType(), right()); + return (crsType() == SpatialCrsType.GEO) ? GEO.geometryRelatesGeometry(docValueReader, component2D) : CARTESIAN.geometryRelatesGeometry(docValueReader, component2D); } catch (IOException e) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistance.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistance.java index 1fdd4241aa222..14bded51aa55f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistance.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistance.java @@ -173,7 +173,7 @@ protected NodeInfo info() { public Object fold() { var leftGeom = makeGeometryFromLiteral(left()); var rightGeom = makeGeometryFromLiteral(right()); - return (crsType == SpatialCrsType.GEO) ? GEO.distance(leftGeom, rightGeom) : CARTESIAN.distance(leftGeom, rightGeom); + return (crsType() == SpatialCrsType.GEO) ? GEO.distance(leftGeom, rightGeom) : CARTESIAN.distance(leftGeom, rightGeom); } @Override From 615e0846178ca92d19c5561cd737a2fa1fe2929b Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 22 Aug 2024 14:13:56 +0100 Subject: [PATCH 147/389] Add more cross-links about sniff/proxy modes (#112079) The info about remote cluster connection modes is a little disjointed. This commit adds some cross-links between the sections to help users find more relevant information. --- .../cluster/remote-clusters-settings.asciidoc | 14 ++++- .../modules/remote-clusters.asciidoc | 55 +++++++++++-------- 2 files changed, 44 insertions(+), 25 deletions(-) diff --git a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc index 2308ec259da48..537783ef6ff01 100644 --- a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc @@ -6,7 +6,10 @@ mode are described separately. `cluster.remote..mode`:: The mode used for a remote cluster connection. The only supported modes are - `sniff` and `proxy`. + `sniff` and `proxy`. The default is `sniff`. See <> for + further information about these modes, and <> + and <> for further information about their + settings. `cluster.remote.initial_connect_timeout`:: @@ -97,6 +100,11 @@ you configure the remotes. [[remote-cluster-sniff-settings]] ==== Sniff mode remote cluster settings +To use <> to connect to a remote cluster, set +`cluster.remote..mode: sniff` and then configure the following +settings. You may also leave `cluster.remote..mode` unset since +`sniff` is the default mode. + `cluster.remote..seeds`:: The list of seed nodes used to sniff the remote cluster state. @@ -117,6 +125,10 @@ you configure the remotes. [[remote-cluster-proxy-settings]] ==== Proxy mode remote cluster settings +To use <> to connect to a remote cluster, set +`cluster.remote..mode: proxy` and then configure the following +settings. + `cluster.remote..proxy_address`:: The address used for all remote connections. diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 510ceb6ddb013..ca1c507aa4ed9 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -1,7 +1,7 @@ [[remote-clusters]] == Remote clusters You can connect a local cluster to other {es} clusters, known as _remote -clusters_. Remote clusters can be located in different datacenters or +clusters_. Remote clusters can be located in different datacenters or geographic regions, and contain indices or data streams that can be replicated with {ccr} or searched by a local cluster using {ccs}. @@ -30,9 +30,9 @@ capabilities, the local and remote cluster must be on the same [discrete] === Add remote clusters -NOTE: The instructions that follow describe how to create a remote connection from a -self-managed cluster. You can also set up {ccs} and {ccr} from an -link:https://www.elastic.co/guide/en/cloud/current/ec-enable-ccs.html[{ess} deployment] +NOTE: The instructions that follow describe how to create a remote connection from a +self-managed cluster. You can also set up {ccs} and {ccr} from an +link:https://www.elastic.co/guide/en/cloud/current/ec-enable-ccs.html[{ess} deployment] or from an link:https://www.elastic.co/guide/en/cloud-enterprise/current/ece-enable-ccs.html[{ece} deployment]. To add remote clusters, you can choose between @@ -52,7 +52,7 @@ controls. <>. Certificate based security model:: Uses mutual TLS authentication for cross-cluster operations. User authentication -is performed on the local cluster and a user's role names are passed to the +is performed on the local cluster and a user's role names are passed to the remote cluster. In this model, a superuser on the local cluster gains total read access to the remote cluster, so it is only suitable for clusters that are in the same security domain. <>. @@ -63,15 +63,17 @@ the same security domain. <>. [[sniff-mode]] Sniff mode:: -In sniff mode, a cluster is registered with a name of your choosing and a list -of addresses of _seed_ nodes. When you register a remote cluster using sniff -mode, {es} retrieves from one of the seed nodes the addresses of up to three -_gateway nodes_. Each `remote_cluster_client` node in the local {es} cluster -then opens several TCP connections to the publish addresses of the gateway -nodes. This mode therefore requires that the gateway nodes' publish addresses -are accessible to nodes in the local cluster. +In sniff mode, a cluster alias is registered with a name of your choosing and a +list of addresses of _seed_ nodes specified with the +`cluster.remote..seeds` setting. When you register a remote +cluster using sniff mode, {es} retrieves from one of the seed nodes the +addresses of up to three _gateway nodes_. Each `remote_cluster_client` node in +the local {es} cluster then opens several TCP connections to the publish +addresses of the gateway nodes. This mode therefore requires that the gateway +nodes' publish addresses are accessible to nodes in the local cluster. + -Sniff mode is the default connection mode. +Sniff mode is the default connection mode. See <> +for more information about configuring sniff mode. + [[gateway-nodes-selection]] The _gateway nodes_ selection depends on the following criteria: @@ -86,18 +88,23 @@ However, such nodes still have to satisfy the two above requirements. [[proxy-mode]] Proxy mode:: -In proxy mode, a cluster is registered with a name of your choosing and the -address of a TCP (layer 4) reverse proxy which you must configure to route -connections to the nodes of the remote cluster. When you register a remote -cluster using proxy mode, {es} opens several TCP connections to the proxy -address and uses these connections to communicate with the remote cluster. In -proxy mode {es} disregards the publish addresses of the remote cluster nodes -which means that the publish addresses of the remote cluster nodes need not be -accessible to the local cluster. +In proxy mode, a cluster alias is registered with a name of your choosing and +the address of a TCP (layer 4) reverse proxy specified with the +`cluster.remote..proxy_address` setting. You must configure this +proxy to route connections to one or more nodes of the remote cluster. When you +register a remote cluster using proxy mode, {es} opens several TCP connections +to the proxy address and uses these connections to communicate with the remote +cluster. In proxy mode {es} disregards the publish addresses of the remote +cluster nodes which means that the publish addresses of the remote cluster +nodes need not be accessible to the local cluster. + -Proxy mode is not the default connection mode, so you must configure it -explicitly if desired. Proxy mode has the same <> as sniff mode. +Proxy mode is not the default connection mode, so you must set +`cluster.remote..mode: proxy` to use it. See +<> for more information about configuring proxy +mode. ++ +Proxy mode has the same <> as sniff mode. include::cluster/remote-clusters-api-key.asciidoc[] From de14c1d3e3e96de0f899236cded9bb410d0b6cce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Thu, 22 Aug 2024 16:08:31 +0200 Subject: [PATCH 148/389] Fix testSuggestProfilesWithHint (#112010) --- .../elasticsearch/xpack/security/profile/ProfileIntegTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java index 963c42c55aa60..d057b7ce0be20 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java @@ -451,7 +451,7 @@ public void testSuggestProfilesWithHint() throws IOException { final List spaces = List.of("space1", "space2", "space3", "space4", "*"); final List profiles = spaces.stream().map(space -> { final PlainActionFuture future1 = new PlainActionFuture<>(); - final String lastName = randomAlphaOfLengthBetween(3, 8); + final String lastName = randomAlphaOfLengthBetween(3, 8) + space; final Authentication.RealmRef realmRef = randomBoolean() ? AuthenticationTestHelper.randomRealmRef(false) : new Authentication.RealmRef( From f37440f441c5a05d45f68c45faa6558b8690eb17 Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Thu, 22 Aug 2024 08:16:36 -0600 Subject: [PATCH 149/389] (Doc+) Allocation Explain Examples: THROTTLED, MAX_RETRY (#111558) Adds [Allocation Explain examples](https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-allocation-explain.html#cluster-allocation-explain-api-examples) for `THROTTLED` and `MAX_RETRY`. Also formats sub TOC so that we can after link code message to those docs. --- .../cluster/allocation-explain.asciidoc | 101 +++++++++++++++++- 1 file changed, 100 insertions(+), 1 deletion(-) diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc index 809c9d74f1450..7547dd74c5ecd 100644 --- a/docs/reference/cluster/allocation-explain.asciidoc +++ b/docs/reference/cluster/allocation-explain.asciidoc @@ -81,6 +81,7 @@ you might expect otherwise. ===== Unassigned primary shard +====== Conflicting settings The following request gets an allocation explanation for an unassigned primary shard. @@ -158,6 +159,56 @@ node. <5> The decider which led to the `no` decision for the node. <6> An explanation as to why the decider returned a `no` decision, with a helpful hint pointing to the setting that led to the decision. In this example, a newly created index has <> that requires that it only be allocated to a node named `nonexistent_node`, which does not exist, so the index is unable to allocate. +====== Maximum number of retries exceeded + +The following response contains an allocation explanation for an unassigned +primary shard that has reached the maximum number of allocation retry attempts. + +[source,js] +---- +{ + "index" : "my-index-000001", + "shard" : 0, + "primary" : true, + "current_state" : "unassigned", + "unassigned_info" : { + "at" : "2017-01-04T18:03:28.464Z", + "failed shard on node [mEKjwwzLT1yJVb8UxT6anw]: failed recovery, failure RecoveryFailedException", + "reason": "ALLOCATION_FAILED", + "failed_allocation_attempts": 5, + "last_allocation_status": "no", + }, + "can_allocate": "no", + "allocate_explanation": "cannot allocate because allocation is not permitted to any of the nodes", + "node_allocation_decisions" : [ + { + "node_id" : "3sULLVJrRneSg0EfBB-2Ew", + "node_name" : "node_t0", + "transport_address" : "127.0.0.1:9400", + "roles" : ["data_content", "data_hot"], + "node_decision" : "no", + "store" : { + "matching_size" : "4.2kb", + "matching_size_in_bytes" : 4325 + }, + "deciders" : [ + { + "decider": "max_retry", + "decision" : "NO", + "explanation": "shard has exceeded the maximum number of retries [5] on failed allocation attempts - manually call [/_cluster/reroute?retry_failed=true] to retry, [unassigned_info[[reason=ALLOCATION_FAILED], at[2024-07-30T21:04:12.166Z], failed_attempts[5], failed_nodes[[mEKjwwzLT1yJVb8UxT6anw]], delayed=false, details[failed shard on node [mEKjwwzLT1yJVb8UxT6anw]: failed recovery, failure RecoveryFailedException], allocation_status[deciders_no]]]" + } + ] + } + ] +} +---- +// NOTCONSOLE + +If decider message indicates a transient allocation issue, use +<> to retry allocation. + +====== No valid shard copy + The following response contains an allocation explanation for an unassigned primary shard that was previously allocated. @@ -184,6 +235,8 @@ TIP: If a shard is unassigned with an allocation status of `no_valid_shard_copy` ===== Unassigned replica shard +====== Allocation delayed + The following response contains an allocation explanation for a replica that's unassigned due to <>. @@ -241,8 +294,52 @@ unassigned due to <>. <2> The remaining delay before allocating the replica shard. <3> Information about the shard data found on a node. +====== Allocation throttled + +The following response contains an allocation explanation for a replica that's +queued to allocate but currently waiting on other queued shards. + +[source,js] +---- +{ + "index" : "my-index-000001", + "shard" : 0, + "primary" : false, + "current_state" : "unassigned", + "unassigned_info" : { + "reason" : "NODE_LEFT", + "at" : "2017-01-04T18:53:59.498Z", + "details" : "node_left[G92ZwuuaRY-9n8_tc-IzEg]", + "last_allocation_status" : "no_attempt" + }, + "can_allocate": "throttled", + "allocate_explanation": "Elasticsearch is currently busy with other activities. It expects to be able to allocate this shard when those activities finish. Please wait.", + "node_allocation_decisions" : [ + { + "node_id" : "3sULLVJrRneSg0EfBB-2Ew", + "node_name" : "node_t0", + "transport_address" : "127.0.0.1:9400", + "roles" : ["data_content", "data_hot"], + "node_decision" : "no", + "deciders" : [ + { + "decider": "throttling", + "decision": "THROTTLE", + "explanation": "reached the limit of incoming shard recoveries [2], cluster setting [cluster.routing.allocation.node_concurrent_incoming_recoveries=2] (can also be set via [cluster.routing.allocation.node_concurrent_recoveries])" + } + ] + } + ] +} +---- +// NOTCONSOLE + +This is a transient message that might appear when a large amount of shards are allocating. + ===== Assigned shard +====== Cannot remain on current node + The following response contains an allocation explanation for an assigned shard. The response indicates the shard is not allowed to remain on its current node and must be reallocated. @@ -295,6 +392,8 @@ and must be reallocated. <2> The deciders that factored into the decision of why the shard is not allowed to remain on its current node. <3> Whether the shard is allowed to be allocated to another node. +====== Must remain on current node + The following response contains an allocation explanation for a shard that must remain on its current node. Moving the shard to another node would not improve cluster balance. @@ -338,7 +437,7 @@ cluster balance. ===== No arguments If you call the API with no arguments, {es} retrieves an allocation explanation -for an arbitrary unassigned primary or replica shard. +for an arbitrary unassigned primary or replica shard, returning any unassigned primary shards first. [source,console] ---- From 6d076dfa17438c02fdd4e912549ff26e57334c72 Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Thu, 22 Aug 2024 17:20:47 +0300 Subject: [PATCH 150/389] ESQL: fix for missing indices error message (#111797) Reverts a part of https://github.com/elastic/elasticsearch/pull/109483 by going back to the previous (more restrictive) way of dealing with missing indices or aliases. More specifically, if an index pattern used in a query refers to a missing index or alias name and doesn't use a wildcard for this name, then we error out. Our lack of testing in this area made the change in https://github.com/elastic/elasticsearch/pull/109483 to be invisible. Fixes https://github.com/elastic/elasticsearch/issues/111712 --- docs/changelog/111797.yaml | 6 + .../xpack/esql/EsqlSecurityIT.java | 103 +++++++++-- .../xpack/esql/ccq/Clusters.java | 8 +- .../xpack/esql/ccq/EsqlRestValidationIT.java | 81 +++++++++ .../qa/multi_node/EsqlRestValidationIT.java | 27 +++ .../qa/single_node/EsqlRestValidationIT.java | 27 +++ .../qa/rest/EsqlRestValidationTestCase.java | 170 ++++++++++++++++++ .../xpack/esql/plugin/ComputeService.java | 9 +- .../RemoteClusterSecurityEsqlIT.java | 83 ++++++--- 9 files changed, 469 insertions(+), 45 deletions(-) create mode 100644 docs/changelog/111797.yaml create mode 100644 x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/EsqlRestValidationIT.java create mode 100644 x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlRestValidationIT.java create mode 100644 x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlRestValidationIT.java create mode 100644 x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlRestValidationTestCase.java diff --git a/docs/changelog/111797.yaml b/docs/changelog/111797.yaml new file mode 100644 index 0000000000000..00b793a19d9c3 --- /dev/null +++ b/docs/changelog/111797.yaml @@ -0,0 +1,6 @@ +pr: 111797 +summary: "ESQL: fix for missing indices error message" +area: ES|QL +type: bug +issues: + - 111712 diff --git a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java index e661ad1e742c9..2b162b4f18ead 100644 --- a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java +++ b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.MapMatcher; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.DistributionType; @@ -160,7 +161,7 @@ public void testAllowedIndices() throws Exception { .entry("values", List.of(List.of(72.0d))); assertMap(entityAsMap(resp), matcher); } - for (var index : List.of("index-user2", "index-user1,index-user2", "index-user*", "index*")) { + for (var index : List.of("index-user2", "index-user*", "index*")) { Response resp = runESQLCommand("metadata1_read2", "from " + index + " | stats sum=sum(value)"); assertOK(resp); MapMatcher matcher = responseMatcher().entry("columns", List.of(Map.of("name", "sum", "type", "double"))) @@ -170,7 +171,7 @@ public void testAllowedIndices() throws Exception { } public void testAliases() throws Exception { - for (var index : List.of("second-alias", "second-alias,index-user2", "second-*", "second-*,index*")) { + for (var index : List.of("second-alias", "second-*", "second-*,index*")) { Response resp = runESQLCommand( "alias_user2", "from " + index + " METADATA _index" + "| stats sum=sum(value), index=VALUES(_index)" @@ -185,7 +186,7 @@ public void testAliases() throws Exception { } public void testAliasFilter() throws Exception { - for (var index : List.of("first-alias", "first-alias,index-user1", "first-alias,index-*", "first-*,index-*")) { + for (var index : List.of("first-alias", "first-alias,index-*", "first-*,index-*")) { Response resp = runESQLCommand("alias_user1", "from " + index + " METADATA _index" + "| KEEP _index, org, value | LIMIT 10"); assertOK(resp); MapMatcher matcher = responseMatcher().entry( @@ -221,19 +222,97 @@ public void testInsufficientPrivilege() { assertThat(error.getMessage(), containsString("Unknown index [index-user1]")); } + public void testIndexPatternErrorMessageComparison_ESQL_SearchDSL() throws Exception { + // _search match_all query on the index-user1,index-user2 index pattern + XContentBuilder json = JsonXContent.contentBuilder(); + json.startObject(); + json.field("query", QueryBuilders.matchAllQuery()); + json.endObject(); + Request searchRequest = new Request("GET", "/index-user1,index-user2/_search"); + searchRequest.setJsonEntity(Strings.toString(json)); + searchRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("es-security-runas-user", "metadata1_read2")); + + // ES|QL query on the same index pattern + var esqlResp = expectThrows(ResponseException.class, () -> runESQLCommand("metadata1_read2", "FROM index-user1,index-user2")); + var srchResp = expectThrows(ResponseException.class, () -> client().performRequest(searchRequest)); + + for (ResponseException r : List.of(esqlResp, srchResp)) { + assertThat( + EntityUtils.toString(r.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [metadata1_read2] with effective roles [metadata1_read2] on indices [index-user1]" + ) + ); + } + assertThat(esqlResp.getResponse().getStatusLine().getStatusCode(), equalTo(srchResp.getResponse().getStatusLine().getStatusCode())); + } + public void testLimitedPrivilege() throws Exception { - Response resp = runESQLCommand("metadata1_read2", """ - FROM index-user1,index-user2 METADATA _index - | STATS sum=sum(value), index=VALUES(_index) - """); - assertOK(resp); - Map respMap = entityAsMap(resp); + ResponseException resp = expectThrows( + ResponseException.class, + () -> runESQLCommand( + "metadata1_read2", + "FROM index-user1,index-user2 METADATA _index | STATS sum=sum(value), index=VALUES(_index)" + ) + ); assertThat( - respMap.get("columns"), - equalTo(List.of(Map.of("name", "sum", "type", "double"), Map.of("name", "index", "type", "keyword"))) + EntityUtils.toString(resp.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [metadata1_read2] with effective roles [metadata1_read2] on indices [index-user1]" + ) + ); + assertThat(resp.getResponse().getStatusLine().getStatusCode(), equalTo(HttpStatus.SC_FORBIDDEN)); + + resp = expectThrows( + ResponseException.class, + () -> runESQLCommand("metadata1_read2", "FROM index-user1,index-user2 METADATA _index | STATS index=VALUES(_index)") ); - assertThat(respMap.get("values"), equalTo(List.of(List.of(72.0, "index-user2")))); + assertThat( + EntityUtils.toString(resp.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [metadata1_read2] with effective roles [metadata1_read2] on indices [index-user1]" + ) + ); + assertThat(resp.getResponse().getStatusLine().getStatusCode(), equalTo(HttpStatus.SC_FORBIDDEN)); + resp = expectThrows( + ResponseException.class, + () -> runESQLCommand("metadata1_read2", "FROM index-user1,index-user2 | STATS sum=sum(value)") + ); + assertThat( + EntityUtils.toString(resp.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [metadata1_read2] with effective roles [metadata1_read2] on indices [index-user1]" + ) + ); + assertThat(resp.getResponse().getStatusLine().getStatusCode(), equalTo(HttpStatus.SC_FORBIDDEN)); + + resp = expectThrows( + ResponseException.class, + () -> runESQLCommand("alias_user1", "FROM first-alias,index-user1 METADATA _index | KEEP _index, org, value | LIMIT 10") + ); + assertThat( + EntityUtils.toString(resp.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [alias_user1] with effective roles [alias_user1] on indices [index-user1]" + ) + ); + assertThat(resp.getResponse().getStatusLine().getStatusCode(), equalTo(HttpStatus.SC_FORBIDDEN)); + + resp = expectThrows( + ResponseException.class, + () -> runESQLCommand( + "alias_user2", + "from second-alias,index-user2 METADATA _index | stats sum=sum(value), index=VALUES(_index)" + ) + ); + assertThat( + EntityUtils.toString(resp.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [alias_user2] with effective roles [alias_user2] on indices [index-user2]" + ) + ); + assertThat(resp.getResponse().getStatusLine().getStatusCode(), equalTo(HttpStatus.SC_FORBIDDEN)); } public void testDocumentLevelSecurity() throws Exception { diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java index f20d758132cbb..fa8cb49c59aed 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java @@ -12,9 +12,13 @@ import org.elasticsearch.test.cluster.util.Version; public class Clusters { + + static final String REMOTE_CLUSTER_NAME = "remote_cluster"; + static final String LOCAL_CLUSTER_NAME = "local_cluster"; + public static ElasticsearchCluster remoteCluster() { return ElasticsearchCluster.local() - .name("remote_cluster") + .name(REMOTE_CLUSTER_NAME) .distribution(DistributionType.DEFAULT) .version(Version.fromString(System.getProperty("tests.old_cluster_version"))) .nodes(2) @@ -28,7 +32,7 @@ public static ElasticsearchCluster remoteCluster() { public static ElasticsearchCluster localCluster(ElasticsearchCluster remoteCluster) { return ElasticsearchCluster.local() - .name("local_cluster") + .name(LOCAL_CLUSTER_NAME) .distribution(DistributionType.DEFAULT) .version(Version.CURRENT) .nodes(2) diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/EsqlRestValidationIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/EsqlRestValidationIT.java new file mode 100644 index 0000000000000..21307c5362417 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/EsqlRestValidationIT.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.ccq; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.EsqlRestValidationTestCase; +import org.junit.AfterClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.StringJoiner; + +import static org.elasticsearch.xpack.esql.ccq.Clusters.REMOTE_CLUSTER_NAME; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class EsqlRestValidationIT extends EsqlRestValidationTestCase { + static ElasticsearchCluster remoteCluster = Clusters.remoteCluster(); + static ElasticsearchCluster localCluster = Clusters.localCluster(remoteCluster); + + @ClassRule + public static TestRule clusterRule = RuleChain.outerRule(remoteCluster).around(localCluster); + private static RestClient remoteClient; + + @Override + protected String getTestRestCluster() { + return localCluster.getHttpAddresses(); + } + + @AfterClass + public static void closeRemoteClients() throws IOException { + try { + IOUtils.close(remoteClient); + } finally { + remoteClient = null; + } + } + + @Override + protected String clusterSpecificIndexName(String pattern) { + StringJoiner sj = new StringJoiner(","); + for (String index : pattern.split(",")) { + sj.add(remoteClusterIndex(index)); + } + return sj.toString(); + } + + private static String remoteClusterIndex(String indexName) { + return REMOTE_CLUSTER_NAME + ":" + indexName; + } + + @Override + protected RestClient provisioningClient() throws IOException { + return remoteClusterClient(); + } + + @Override + protected RestClient provisioningAdminClient() throws IOException { + return remoteClusterClient(); + } + + private RestClient remoteClusterClient() throws IOException { + if (remoteClient == null) { + var clusterHosts = parseClusterHosts(remoteCluster.getHttpAddresses()); + remoteClient = buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[0])); + } + return remoteClient; + } +} diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlRestValidationIT.java b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlRestValidationIT.java new file mode 100644 index 0000000000000..0187bafe19fce --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlRestValidationIT.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.multi_node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.EsqlRestValidationTestCase; +import org.junit.ClassRule; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class EsqlRestValidationIT extends EsqlRestValidationTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(spec -> {}); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlRestValidationIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlRestValidationIT.java new file mode 100644 index 0000000000000..5a31fc722eec1 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlRestValidationIT.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.single_node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.EsqlRestValidationTestCase; +import org.junit.ClassRule; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class EsqlRestValidationIT extends EsqlRestValidationTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlRestValidationTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlRestValidationTestCase.java new file mode 100644 index 0000000000000..9ec4f60f4c843 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlRestValidationTestCase.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.rest; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.WarningsHandler; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public abstract class EsqlRestValidationTestCase extends ESRestTestCase { + + private static final String indexName = "test_esql"; + private static final String aliasName = "alias-test_esql"; + protected static final String[] existentIndexWithWildcard = new String[] { + indexName + ",inexistent*", + indexName + "*,inexistent*", + "inexistent*," + indexName }; + private static final String[] existentIndexWithoutWildcard = new String[] { indexName + ",inexistent", "inexistent," + indexName }; + protected static final String[] existentAliasWithWildcard = new String[] { + aliasName + ",inexistent*", + aliasName + "*,inexistent*", + "inexistent*," + aliasName }; + private static final String[] existentAliasWithoutWildcard = new String[] { aliasName + ",inexistent", "inexistent," + aliasName }; + private static final String[] inexistentIndexNameWithWildcard = new String[] { "inexistent*", "inexistent1*,inexistent2*" }; + private static final String[] inexistentIndexNameWithoutWildcard = new String[] { "inexistent", "inexistent1,inexistent2" }; + private static final String createAlias = "{\"actions\":[{\"add\":{\"index\":\"" + indexName + "\",\"alias\":\"" + aliasName + "\"}}]}"; + private static final String removeAlias = "{\"actions\":[{\"remove\":{\"index\":\"" + + indexName + + "\",\"alias\":\"" + + aliasName + + "\"}}]}"; + + @Before + @After + public void assertRequestBreakerEmpty() throws Exception { + EsqlSpecTestCase.assertRequestBreakerEmpty(); + } + + @Before + public void prepareIndices() throws IOException { + if (provisioningClient().performRequest(new Request("HEAD", "/" + indexName)).getStatusLine().getStatusCode() == 404) { + var request = new Request("PUT", "/" + indexName); + request.setJsonEntity("{\"mappings\": {\"properties\": {\"foo\":{\"type\":\"keyword\"}}}}"); + provisioningClient().performRequest(request); + } + assertOK(provisioningAdminClient().performRequest(new Request("POST", "/" + indexName + "/_refresh"))); + } + + @After + public void wipeTestData() throws IOException { + try { + var response = provisioningAdminClient().performRequest(new Request("DELETE", "/" + indexName)); + assertEquals(200, response.getStatusLine().getStatusCode()); + } catch (ResponseException re) { + assertEquals(404, re.getResponse().getStatusLine().getStatusCode()); + } + } + + private String getInexistentIndexErrorMessage() { + return "\"reason\" : \"Found 1 problem\\nline 1:1: Unknown index "; + } + + public void testInexistentIndexNameWithWildcard() throws IOException { + assertErrorMessages(inexistentIndexNameWithWildcard, getInexistentIndexErrorMessage(), 400); + } + + public void testInexistentIndexNameWithoutWildcard() throws IOException { + assertErrorMessages(inexistentIndexNameWithoutWildcard, getInexistentIndexErrorMessage(), 400); + } + + public void testExistentIndexWithoutWildcard() throws IOException { + for (String indexName : existentIndexWithoutWildcard) { + assertErrorMessage(indexName, "\"reason\" : \"no such index [inexistent]\"", 404); + } + } + + public void testExistentIndexWithWildcard() throws IOException { + assertValidRequestOnIndices(existentIndexWithWildcard); + } + + public void testAlias() throws IOException { + createAlias(); + + for (String indexName : existentAliasWithoutWildcard) { + assertErrorMessage(indexName, "\"reason\" : \"no such index [inexistent]\"", 404); + } + assertValidRequestOnIndices(existentAliasWithWildcard); + + deleteAlias(); + } + + private void assertErrorMessages(String[] indices, String errorMessage, int statusCode) throws IOException { + for (String indexName : indices) { + assertErrorMessage(indexName, errorMessage + "[" + clusterSpecificIndexName(indexName) + "]", statusCode); + } + } + + protected String clusterSpecificIndexName(String indexName) { + return indexName; + } + + private void assertErrorMessage(String indexName, String errorMessage, int statusCode) throws IOException { + var specificName = clusterSpecificIndexName(indexName); + final var request = createRequest(specificName); + ResponseException exc = expectThrows(ResponseException.class, () -> client().performRequest(request)); + + assertThat(exc.getResponse().getStatusLine().getStatusCode(), equalTo(statusCode)); + assertThat(exc.getMessage(), containsString(errorMessage)); + } + + private Request createRequest(String indexName) throws IOException { + final var request = new Request("POST", "/_query"); + request.addParameter("error_trace", "true"); + request.addParameter("pretty", "true"); + request.setJsonEntity( + Strings.toString(JsonXContent.contentBuilder().startObject().field("query", "from " + indexName).endObject()) + ); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.setWarningsHandler(WarningsHandler.PERMISSIVE); + request.setOptions(options); + return request; + } + + private void assertValidRequestOnIndices(String[] indices) throws IOException { + for (String indexName : indices) { + final var request = createRequest(clusterSpecificIndexName(indexName)); + Response response = client().performRequest(request); + assertOK(response); + } + } + + // Returned client is used to load the test data, either in the local cluster or a remote one (for + // multi-clusters). The client()/adminClient() will always connect to the local cluster + protected RestClient provisioningClient() throws IOException { + return client(); + } + + protected RestClient provisioningAdminClient() throws IOException { + return adminClient(); + } + + private void createAlias() throws IOException { + var r = new Request("POST", "_aliases"); + r.setJsonEntity(createAlias); + assertOK(provisioningClient().performRequest(r)); + } + + private void deleteAlias() throws IOException { + var r = new Request("POST", "/_aliases/"); + r.setJsonEntity(removeAlias); + assertOK(provisioningAdminClient().performRequest(r)); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index 29d524fc664a8..fa8a5693c59bb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -11,11 +11,11 @@ import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchShardsGroup; import org.elasticsearch.action.search.SearchShardsRequest; import org.elasticsearch.action.search.SearchShardsResponse; import org.elasticsearch.action.support.ChannelActionListener; -import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -68,7 +68,6 @@ import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.session.Configuration; -import org.elasticsearch.xpack.esql.session.IndexResolver; import org.elasticsearch.xpack.esql.session.Result; import java.util.ArrayList; @@ -98,8 +97,6 @@ public class ComputeService { private final EnrichLookupService enrichLookupService; private final ClusterService clusterService; - private static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndexResolver.FIELD_CAPS_INDICES_OPTIONS; - public ComputeService( SearchService searchService, TransportService transportService, @@ -152,7 +149,7 @@ public void execute( return; } Map clusterToConcreteIndices = transportService.getRemoteClusterService() - .groupIndices(DEFAULT_INDICES_OPTIONS, PlannerUtils.planConcreteIndices(physicalPlan).toArray(String[]::new)); + .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planConcreteIndices(physicalPlan).toArray(String[]::new)); QueryPragmas queryPragmas = configuration.pragmas(); if (dataNodePlan == null) { if (clusterToConcreteIndices.values().stream().allMatch(v -> v.indices().length == 0) == false) { @@ -188,7 +185,7 @@ public void execute( } } Map clusterToOriginalIndices = transportService.getRemoteClusterService() - .groupIndices(DEFAULT_INDICES_OPTIONS, PlannerUtils.planOriginalIndices(physicalPlan)); + .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planOriginalIndices(physicalPlan)); var localOriginalIndices = clusterToOriginalIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); var localConcreteIndices = clusterToConcreteIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); final var exchangeSource = new ExchangeSourceHandler( diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java index 262e1340fb465..f5f9410a145cc 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.Tuple; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.junit.RunnableTestRuleAdapter; @@ -347,21 +348,6 @@ public void testCrossClusterQuery() throws Exception { | LIMIT 10""")); assertRemoteAndLocalResults(response); - // query remote cluster only - but also include employees2 which the user does not have access to - response = performRequestWithRemoteSearchUser(esqlRequest(""" - FROM my_remote_cluster:employees,my_remote_cluster:employees2 - | SORT emp_id ASC - | LIMIT 2 - | KEEP emp_id, department""")); - assertRemoteOnlyResults(response); // same as above since the user only has access to employees - - // query remote and local cluster - but also include employees2 which the user does not have access to - response = performRequestWithRemoteSearchUser(esqlRequest(""" - FROM my_remote_cluster:employees,my_remote_cluster:employees2,employees,employees2 - | SORT emp_id ASC - | LIMIT 10""")); - assertRemoteAndLocalResults(response); // same as above since the user only has access to employees - // update role to include both employees and employees2 for the remote cluster final var putRoleRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); putRoleRequest.setJsonEntity(""" @@ -618,6 +604,37 @@ public void testCrossClusterQueryWithOnlyRemotePrivs() throws Exception { + "this action is granted by the index privileges [read,read_cross_cluster,all]" ) ); + + // query remote cluster only - but also include employees2 which the user does not have access to + error = expectThrows(ResponseException.class, () -> { performRequestWithRemoteSearchUser(esqlRequest(""" + FROM my_remote_cluster:employees,my_remote_cluster:employees2 + | SORT emp_id ASC + | LIMIT 2 + | KEEP emp_id, department""")); }); + + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat( + error.getMessage(), + containsString( + "action [indices:data/read/esql] is unauthorized for user [remote_search_user] with effective roles " + + "[remote_search], this action is granted by the index privileges [read,read_cross_cluster,all]" + ) + ); + + // query remote and local cluster - but also include employees2 which the user does not have access to + error = expectThrows(ResponseException.class, () -> { performRequestWithRemoteSearchUser(esqlRequest(""" + FROM my_remote_cluster:employees,my_remote_cluster:employees2,employees,employees2 + | SORT emp_id ASC + | LIMIT 10""")); }); + + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat( + error.getMessage(), + containsString( + "action [indices:data/read/esql] is unauthorized for user [remote_search_user] with effective roles " + + "[remote_search], this action is granted by the index privileges [read,read_cross_cluster,all]" + ) + ); } @SuppressWarnings("unchecked") @@ -841,7 +858,7 @@ public void testAlias() throws Exception { }"""); assertOK(adminClient().performRequest(putRoleRequest)); // query `employees2` - for (String index : List.of("*:employees2", "*:employee*", "*:employee*,*:alias-employees,*:employees3")) { + for (String index : List.of("*:employees2", "*:employee*")) { Request request = esqlRequest("FROM " + index + " | KEEP emp_id | SORT emp_id | LIMIT 100"); Response response = performRequestWithRemoteSearchUser(request); assertOK(response); @@ -849,15 +866,7 @@ public void testAlias() throws Exception { List ids = (List) responseAsMap.get("values"); assertThat(ids, equalTo(List.of(List.of("11"), List.of("13")))); } - // query `alias-engineering` - for (var index : List.of("*:alias*", "*:alias*", "*:alias*,my*:employees1", "*:alias*,my*:employees3")) { - Request request = esqlRequest("FROM " + index + " | KEEP emp_id | SORT emp_id | LIMIT 100"); - Response response = performRequestWithRemoteSearchUser(request); - assertOK(response); - Map responseAsMap = entityAsMap(response); - List ids = (List) responseAsMap.get("values"); - assertThat(ids, equalTo(List.of(List.of("1"), List.of("7")))); - } + // query `employees2` and `alias-engineering` for (var index : List.of("*:employees2,*:alias-engineering", "*:emp*,*:alias-engineering", "*:emp*,my*:alias*")) { Request request = esqlRequest("FROM " + index + " | KEEP emp_id | SORT emp_id | LIMIT 100"); @@ -874,6 +883,30 @@ public void testAlias() throws Exception { assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(400)); assertThat(error.getMessage(), containsString(" Unknown index [" + index + "]")); } + + for (var index : List.of( + Tuple.tuple("*:employee*,*:alias-employees,*:employees3", "alias-employees,employees3"), + Tuple.tuple("*:alias*,my*:employees1", "employees1"), + Tuple.tuple("*:alias*,my*:employees3", "employees3") + )) { + Request request = esqlRequest("FROM " + index.v1() + " | KEEP emp_id | SORT emp_id | LIMIT 100"); + ResponseException error = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(request)); + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat( + error.getMessage(), + containsString("unauthorized for user [remote_search_user] with assigned roles [remote_search]") + ); + assertThat(error.getMessage(), containsString("user [test_user] on indices [" + index.v2() + "]")); + } + + // query `alias-engineering` + Request request = esqlRequest("FROM *:alias* | KEEP emp_id | SORT emp_id | LIMIT 100"); + Response response = performRequestWithRemoteSearchUser(request); + assertOK(response); + Map responseAsMap = entityAsMap(response); + List ids = (List) responseAsMap.get("values"); + assertThat(ids, equalTo(List.of(List.of("1"), List.of("7")))); + removeAliases(); } From ed60470518131a26f387df32915448b40098db48 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Thu, 22 Aug 2024 17:42:49 +0300 Subject: [PATCH 151/389] Display effective retention in the relevant data stream APIs (#112019) --- docs/changelog/112019.yaml | 5 + .../lifecycle/apis/get-lifecycle.asciidoc | 8 +- .../data-streams/lifecycle/index.asciidoc | 18 +- ...rial-manage-data-stream-retention.asciidoc | 215 ++++++++++++++++++ .../tutorial-manage-new-data-stream.asciidoc | 13 +- ...grate-data-stream-from-ilm-to-dsl.asciidoc | 38 ++-- .../DataStreamGlobalRetentionIT.java | 190 ++++++++++++++++ .../RestExplainDataStreamLifecycleAction.java | 7 + .../RestGetDataStreamLifecycleAction.java | 7 + .../rest/RestGetDataStreamsAction.java | 7 + .../lifecycle/40_effective_retention.yml | 104 +++++++++ .../datastreams/GetDataStreamAction.java | 2 +- .../ExplainDataStreamLifecycleAction.java | 2 +- .../GetDataStreamLifecycleAction.java | 2 +- .../cluster/metadata/DataStreamLifecycle.java | 14 +- .../metadata/DataStreamLifecycleTests.java | 21 +- 16 files changed, 595 insertions(+), 58 deletions(-) create mode 100644 docs/changelog/112019.yaml create mode 100644 docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc create mode 100644 modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionIT.java create mode 100644 modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/40_effective_retention.yml diff --git a/docs/changelog/112019.yaml b/docs/changelog/112019.yaml new file mode 100644 index 0000000000000..7afb207864ed7 --- /dev/null +++ b/docs/changelog/112019.yaml @@ -0,0 +1,5 @@ +pr: 112019 +summary: Display effective retention in the relevant data stream APIs +area: Data streams +type: enhancement +issues: [] diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc index c83572a4e0795..6bac1c7f7cc75 100644 --- a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc @@ -128,14 +128,18 @@ The response will look like the following: "name": "my-data-stream-1", "lifecycle": { "enabled": true, - "data_retention": "7d" + "data_retention": "7d", + "effective_retention": "7d", + "retention_determined_by": "data_stream_configuration" } }, { "name": "my-data-stream-2", "lifecycle": { "enabled": true, - "data_retention": "7d" + "data_retention": "7d", + "effective_retention": "7d", + "retention_determined_by": "data_stream_configuration" } } ] diff --git a/docs/reference/data-streams/lifecycle/index.asciidoc b/docs/reference/data-streams/lifecycle/index.asciidoc index 16ccf2ef82391..e4d5acfb704d3 100644 --- a/docs/reference/data-streams/lifecycle/index.asciidoc +++ b/docs/reference/data-streams/lifecycle/index.asciidoc @@ -14,10 +14,11 @@ To achieve that, it supports: * Automatic <>, which chunks your incoming data in smaller pieces to facilitate better performance and backwards incompatible mapping changes. * Configurable retention, which allows you to configure the time period for which your data is guaranteed to be stored. -{es} is allowed at a later time to delete data older than this time period. +{es} is allowed at a later time to delete data older than this time period. Retention can be configured on the data stream level +or on a global level. Read more about the different options in this <>. A data stream lifecycle also supports downsampling the data stream backing indices. -See <> for +See <> for more details. [discrete] @@ -33,16 +34,17 @@ each data stream and performs the following steps: 3. After an index is not the write index anymore (i.e. the data stream has been rolled over), automatically tail merges the index. Data stream lifecycle executes a merge operation that only targets the long tail of small segments instead of the whole shard. As the segments are organised -into tiers of exponential sizes, merging the long tail of small segments is only a +into tiers of exponential sizes, merging the long tail of small segments is only a fraction of the cost of force merging to a single segment. The small segments would usually hold the most recent data so tail merging will focus the merging resources on the higher-value data that is most likely to keep being queried. -4. If <> is configured it will execute +4. If <> is configured it will execute all the configured downsampling rounds. 5. Applies retention to the remaining backing indices. This means deleting the backing indices whose -`generation_time` is longer than the configured retention period. The `generation_time` is only applicable to rolled over backing -indices and it is either the time since the backing index got rolled over, or the time optionally configured in the -<> setting. +`generation_time` is longer than the effective retention period (read more about the +<>). The `generation_time` is only applicable to rolled +over backing indices and it is either the time since the backing index got rolled over, or the time optionally configured +in the <> setting. IMPORTANT: We use the `generation_time` instead of the creation time because this ensures that all data in the backing index have passed the retention period. As a result, the retention period is not the exact time data gets deleted, but @@ -75,4 +77,6 @@ include::tutorial-manage-new-data-stream.asciidoc[] include::tutorial-manage-existing-data-stream.asciidoc[] +include::tutorial-manage-data-stream-retention.asciidoc[] + include::tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc[] diff --git a/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc new file mode 100644 index 0000000000000..83a587c250e73 --- /dev/null +++ b/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc @@ -0,0 +1,215 @@ +[role="xpack"] +[[tutorial-manage-data-stream-retention]] +=== Tutorial: Data stream retention + +In this tutorial, we are going to go over the data stream lifecycle retention; we will define it, go over how it can be configured +and how it can gets applied. Keep in mind, the following options apply only to data streams that are managed by the data stream lifecycle. + +. <> +. <> +. <> +. <> + +You can verify if a data steam is managed by the data stream lifecycle via the <>: + +//// +[source,console] +---- +PUT /_index_template/template +{ + "index_patterns": ["my-data-stream*"], + "template": { + "lifecycle": {} + }, + "data_stream": { } +} + +PUT /_data_stream/my-data-stream +---- +// TESTSETUP +//// + +//// +[source,console] +---- +DELETE /_data_stream/my-data-stream* +DELETE /_index_template/template +PUT /_cluster/settings +{ + "persistent" : { + "data_streams.lifecycle.retention.*" : null + } +} +---- +// TEARDOWN +//// + +[source,console] +-------------------------------------------------- +GET _data_stream/my-data-stream/_lifecycle +-------------------------------------------------- + +The result should look like this: + +[source,console-result] +-------------------------------------------------- +{ + "data_streams": [ + { + "name": "my-data-stream", <1> + "lifecycle": { + "enabled": true <2> + } + } + ] +} +-------------------------------------------------- +// TESTRESPONSE[skip:the result is for illustrating purposes only] +<1> The name of your data stream. +<2> Ensure that the lifecycle is enabled, meaning this should be `true`. + +[discrete] +[[what-is-retention]] +==== What is data stream retention? + +We define retention as the least amount of time the data of a data stream are going to be kept in {es}. After this time period +has passed, {es} is allowed to remove these data to free up space and/or manage costs. + +NOTE: Retention does not define the period that the data will be removed, but the minimum time period they will be kept. + +We define 4 different types of retention: + +* The data stream retention, or `data_retention`, which is the retention configured on the data stream level. It can be +set via an <> for future data streams or via the <> for an existing data stream. When the data stream retention is not set, it implies that the data +need to be kept forever. +* The global default retention, let's call it `default_retention`, which is a retention configured via the cluster setting +<> and will be +applied to all data streams managed by data stream lifecycle that do not have `data_retention` configured. Effectively, +it ensures that there will be no data streams keeping their data forever. This can be set via the +<>. +* The global max retention, let's call it `max_retention`, which is a retention configured via the cluster setting +<> and will be applied to +all data streams managed by data stream lifecycle. Effectively, it ensures that there will be no data streams whose retention +will exceed this time period. This can be set via the <>. +* The effective retention, or `effective_retention`, which is the retention applied at a data stream on a given moment. +Effective retention cannot be set, it is derived by taking into account all the configured retention listed above and is +calculated as it is described <>. + +[discrete] +[[retention-configuration]] +==== How to configure retention? + +- By setting the `data_retention` on the data stream level. This retention can be configured in two ways: ++ +-- For new data streams, it can be defined in the index template that would be applied during the data stream's creation. +You can use the <>, for example: ++ +[source,console] +-------------------------------------------------- +PUT _index_template/template +{ + "index_patterns": ["my-data-stream*"], + "data_stream": { }, + "priority": 500, + "template": { + "lifecycle": { + "data_retention": "7d" + } + }, + "_meta": { + "description": "Template with data stream lifecycle" + } +} +-------------------------------------------------- +-- For an existing data stream, it can be set via the <>. ++ +[source,console] +---- +PUT _data_stream/my-data-stream/_lifecycle +{ + "data_retention": "30d" <1> +} +---- +// TEST[continued] +<1> The retention period of this data stream is set to 30 days. + +- By setting the global retention via the `data_streams.lifecycle.retention.default` and/or `data_streams.lifecycle.retention.max` +that are set on a cluster level. You can be set via the <>. For example: ++ +[source,console] +-------------------------------------------------- +PUT /_cluster/settings +{ + "persistent" : { + "data_streams.lifecycle.retention.default" : "7d", + "data_streams.lifecycle.retention.max" : "90d" + } +} +-------------------------------------------------- +// TEST[continued] + +[discrete] +[[effective-retention-calculation]] +==== How is the effective retention calculated? +The effective is calculated in the following way: + +- The `effective_retention` is the `default_retention`, when `default_retention` is defined and the data stream does not +have `data_retention`. +- The `effective_retention` is the `data_retention`, when `data_retention` is defined and if `max_retention` is defined, +it is less than the `max_retention`. +- The `effective_retention` is the `max_retention`, when `max_retention` is defined, and the data stream has either no +`data_retention` or its `data_retention` is greater than the `max_retention`. + +The above is demonstrated in the examples below: + +|=== +|`default_retention` |`max_retention` |`data_retention` |`effective_retention` |Retention determined by + +|Not set |Not set |Not set |Infinite |N/A +|Not relevant |12 months |**30 days** |30 days |`data_retention` +|Not relevant |Not set |**30 days** |30 days |`data_retention` +|**30 days** |12 months |Not set |30 days |`default_retention` +|**30 days** |30 days |Not set |30 days |`default_retention` +|Not relevant |**30 days** |12 months |30 days |`max_retention` +|Not set |**30 days** |Not set |30 days |`max_retention` +|=== + +Considering our example, if we retrieve the lifecycle of `my-data-stream`: +[source,console] +---- +GET _data_stream/my-data-stream/_lifecycle +---- +// TEST[continued] + +We see that it will remain the same with what the user configured: +[source,console-result] +---- +{ + "data_streams": [ + { + "name": "my-data-stream", + "lifecycle": { + "enabled": true, + "data_retention": "30d", + "effective_retention": "30d", + "retention_determined_by": "data_stream_configuration" + } + } + ] +} +---- + +[discrete] +[[effective-retention-application]] +==== How is the effective retention applied? + +Retention is applied to the remaining backing indices of a data stream as the last step of +<>. Data stream lifecycle will retrieve the backing indices +whose `generation_time` is longer than the effective retention period and delete them. The `generation_time` is only +applicable to rolled over backing indices and it is either the time since the backing index got rolled over, or the time +optionally configured in the <> setting. + +IMPORTANT: We use the `generation_time` instead of the creation time because this ensures that all data in the backing +index have passed the retention period. As a result, the retention period is not the exact time data get deleted, but +the minimum time data will be stored. diff --git a/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc index c34340a096046..01d51cdde3167 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc @@ -91,10 +91,12 @@ The result will look like this: { "data_streams": [ { - "name": "my-data-stream",<1> + "name": "my-data-stream", <1> "lifecycle": { - "enabled": true, <2> - "data_retention": "7d" <3> + "enabled": true, <2> + "data_retention": "7d", <3> + "effective_retention": "7d", <4> + "retention_determined_by": "data_stream_configuration" } } ] @@ -102,8 +104,9 @@ The result will look like this: -------------------------------------------------- <1> The name of your data stream. <2> Shows if the data stream lifecycle is enabled for this data stream. -<3> The retention period of the data indexed in this data stream, this means that the data in this data stream will -be kept at least for 7 days. After that {es} can delete it at its own discretion. +<3> The retention period of the data indexed in this data stream, as configured by the user. +<4> The retention period that will be applied by the data stream lifecycle. This means that the data in this data stream will + be kept at least for 7 days. After that {es} can delete it at its own discretion. If you want to see more information about how the data stream lifecycle is applied on individual backing indices use the <>: diff --git a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc index 8d959d8f4ad84..a2c12466b7f2b 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc @@ -1,14 +1,14 @@ [role="xpack"] [[tutorial-migrate-data-stream-from-ilm-to-dsl]] -=== Tutorial: Migrate ILM managed data stream to data stream lifecycle +=== Tutorial: Migrate ILM managed data stream to data stream lifecycle In this tutorial we'll look at migrating an existing data stream from <> to -<>. The existing {ilm-init} managed backing indices will continue +<>. The existing {ilm-init} managed backing indices will continue to be managed by {ilm-init} until they age out and get deleted by {ilm-init}; however, -the new backing indices will be managed by data stream lifecycle. -This way, a data stream is gradually migrated away from being managed by {ilm-init} to +the new backing indices will be managed by data stream lifecycle. +This way, a data stream is gradually migrated away from being managed by {ilm-init} to being managed by data stream lifecycle. As we'll see, {ilm-init} and data stream lifecycle -can co-manage a data stream; however, an index can only be managed by one system at +can co-manage a data stream; however, an index can only be managed by one system at a time. [discrete] @@ -17,7 +17,7 @@ a time. To migrate a data stream from {ilm-init} to data stream lifecycle we'll have to execute two steps: -1. Update the index template that's backing the data stream to set <> +1. Update the index template that's backing the data stream to set <> to `false`, and to configure data stream lifecycle. 2. Configure the data stream lifecycle for the _existing_ data stream using the <>. @@ -174,8 +174,8 @@ in the index template). To migrate the `dsl-data-stream` to data stream lifecycle we'll have to execute two steps: -1. Update the index template that's backing the data stream to set <> -to `false`, and to configure data stream lifecycle. +1. Update the index template that's backing the data stream to set <> +to `false`, and to configure data stream lifecycle. 2. Configure the data stream lifecycle for the _existing_ `dsl-data-stream` using the <>. @@ -209,9 +209,9 @@ PUT _index_template/dsl-data-stream-template // TEST[continued] <1> The `prefer_ilm` setting will now be configured on the **new** backing indices -(created by rolling over the data stream) such that {ilm-init} does _not_ take +(created by rolling over the data stream) such that {ilm-init} does _not_ take precedence over data stream lifecycle. -<2> We're configuring the data stream lifecycle so _new_ data streams will be +<2> We're configuring the data stream lifecycle so _new_ data streams will be managed by data stream lifecycle. We've now made sure that new data streams will be managed by data stream lifecycle. @@ -227,7 +227,7 @@ PUT _data_stream/dsl-data-stream/_lifecycle ---- // TEST[continued] -We can inspect the data stream to check that the next generation will indeed be +We can inspect the data stream to check that the next generation will indeed be managed by data stream lifecycle: [source,console] @@ -266,7 +266,9 @@ GET _data_stream/dsl-data-stream "template": "dsl-data-stream-template", "lifecycle": { "enabled": true, - "data_retention": "7d" + "data_retention": "7d", + "effective_retention": "7d", + "retention_determined_by": "data_stream_configuration" }, "ilm_policy": "pre-dsl-ilm-policy", "next_generation_managed_by": "Data stream lifecycle", <3> @@ -292,7 +294,7 @@ GET _data_stream/dsl-data-stream <4> The `prefer_ilm` setting value we configured in the index template is reflected and will be configured accordingly for new backing indices. -We'll now rollover the data stream to see the new generation index being managed by +We'll now rollover the data stream to see the new generation index being managed by data stream lifecycle: [source,console] @@ -344,7 +346,9 @@ GET _data_stream/dsl-data-stream "template": "dsl-data-stream-template", "lifecycle": { "enabled": true, - "data_retention": "7d" + "data_retention": "7d", + "effective_retention": "7d", + "retention_determined_by": "data_stream_configuration" }, "ilm_policy": "pre-dsl-ilm-policy", "next_generation_managed_by": "Data stream lifecycle", @@ -375,9 +379,9 @@ in the index template [discrete] [[migrate-from-dsl-to-ilm]] ==== Migrate data stream back to ILM -We can easily change this data stream to be managed by {ilm-init} because we didn't remove -the {ilm-init} policy when we <>. +We can easily change this data stream to be managed by {ilm-init} because we didn't remove +the {ilm-init} policy when we <>. We can achieve this in two ways: diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionIT.java new file mode 100644 index 0000000000000..514eb6d8742ea --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionIT.java @@ -0,0 +1,190 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams.lifecycle; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.WarningFailureException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.datastreams.DisabledSecurityDataStreamTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class DataStreamGlobalRetentionIT extends DisabledSecurityDataStreamTestCase { + + @Before + public void setup() throws IOException { + updateClusterSettings( + Settings.builder() + .put("data_streams.lifecycle.poll_interval", "1s") + .put("cluster.lifecycle.default.rollover", "min_docs=1,max_docs=1") + .build() + ); + // Create a template with the default lifecycle + Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/1"); + putComposableIndexTemplateRequest.setJsonEntity(""" + { + "index_patterns": ["my-data-stream*"], + "data_stream": {}, + "template": { + "lifecycle": {} + } + } + """); + assertOK(client().performRequest(putComposableIndexTemplateRequest)); + + // Create a data streams with one doc + Request createDocRequest = new Request("POST", "/my-data-stream/_doc?refresh=true"); + createDocRequest.setJsonEntity("{ \"@timestamp\": \"2022-12-12\"}"); + assertOK(client().performRequest(createDocRequest)); + } + + @After + public void cleanUp() throws IOException { + adminClient().performRequest(new Request("DELETE", "_data_stream/*")); + updateClusterSettings( + Settings.builder().putNull("data_streams.lifecycle.retention.default").putNull("data_streams.lifecycle.retention.max").build() + ); + } + + @SuppressWarnings("unchecked") + public void testDataStreamRetention() throws Exception { + // Set global retention and add retention to the data stream + { + updateClusterSettings( + Settings.builder() + .put("data_streams.lifecycle.retention.default", "7d") + .put("data_streams.lifecycle.retention.default", "90d") + .build() + ); + Request request = new Request("PUT", "_data_stream/my-data-stream/_lifecycle"); + request.setJsonEntity(""" + { + "data_retention": "10s" + }"""); + assertAcknowledged(client().performRequest(request)); + } + + // Verify that the effective retention matches the default retention + { + Request request = new Request("GET", "/_data_stream/my-data-stream"); + Response response = client().performRequest(request); + List dataStreams = (List) entityAsMap(response).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + Map lifecycle = (Map) dataStream.get("lifecycle"); + assertThat(lifecycle.get("effective_retention"), is("10s")); + assertThat(lifecycle.get("retention_determined_by"), is("data_stream_configuration")); + assertThat(lifecycle.get("data_retention"), is("10s")); + } + + // Verify that the first generation index was removed + assertBusy(() -> { + Response response = client().performRequest(new Request("GET", "/_data_stream/my-data-stream")); + Map dataStream = ((List>) entityAsMap(response).get("data_streams")).get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + List backingIndices = (List) dataStream.get("indices"); + assertThat(backingIndices.size(), is(1)); + // 2 backing indices created + 1 for the deleted index + assertThat(dataStream.get("generation"), is(3)); + }, 20, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + public void testDefaultRetention() throws Exception { + // Set default global retention + updateClusterSettings(Settings.builder().put("data_streams.lifecycle.retention.default", "10s").build()); + + // Verify that the effective retention matches the default retention + { + Request request = new Request("GET", "/_data_stream/my-data-stream"); + Response response = client().performRequest(request); + List dataStreams = (List) entityAsMap(response).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + Map lifecycle = (Map) dataStream.get("lifecycle"); + assertThat(lifecycle.get("effective_retention"), is("10s")); + assertThat(lifecycle.get("retention_determined_by"), is("default_global_retention")); + assertThat(lifecycle.get("data_retention"), nullValue()); + } + + // Verify that the first generation index was removed + assertBusy(() -> { + Response response = client().performRequest(new Request("GET", "/_data_stream/my-data-stream")); + Map dataStream = ((List>) entityAsMap(response).get("data_streams")).get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + List backingIndices = (List) dataStream.get("indices"); + assertThat(backingIndices.size(), is(1)); + // 2 backing indices created + 1 for the deleted index + assertThat(dataStream.get("generation"), is(3)); + }, 20, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + public void testMaxRetention() throws Exception { + // Set default global retention + updateClusterSettings(Settings.builder().put("data_streams.lifecycle.retention.max", "10s").build()); + boolean withDataStreamLevelRetention = randomBoolean(); + if (withDataStreamLevelRetention) { + try { + Request request = new Request("PUT", "_data_stream/my-data-stream/_lifecycle"); + request.setJsonEntity(""" + { + "data_retention": "30d" + }"""); + assertAcknowledged(client().performRequest(request)); + fail("Should have returned a warning about data retention exceeding the max retention"); + } catch (WarningFailureException warningFailureException) { + assertThat( + warningFailureException.getMessage(), + containsString("The retention provided [30d] is exceeding the max allowed data retention of this project [10s]") + ); + } + } + + // Verify that the effective retention matches the max retention + { + Request request = new Request("GET", "/_data_stream/my-data-stream"); + Response response = client().performRequest(request); + List dataStreams = (List) entityAsMap(response).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + Map lifecycle = (Map) dataStream.get("lifecycle"); + assertThat(lifecycle.get("effective_retention"), is("10s")); + assertThat(lifecycle.get("retention_determined_by"), is("max_global_retention")); + if (withDataStreamLevelRetention) { + assertThat(lifecycle.get("data_retention"), is("30d")); + } else { + assertThat(lifecycle.get("data_retention"), nullValue()); + } + } + + // Verify that the first generation index was removed + assertBusy(() -> { + Response response = client().performRequest(new Request("GET", "/_data_stream/my-data-stream")); + Map dataStream = ((List>) entityAsMap(response).get("data_streams")).get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + List backingIndices = (List) dataStream.get("indices"); + assertThat(backingIndices.size(), is(1)); + // 2 backing indices created + 1 for the deleted index + assertThat(dataStream.get("generation"), is(3)); + }, 20, TimeUnit.SECONDS); + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java index f44e59d0278c3..82350130e57af 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.datastreams.lifecycle.ExplainDataStreamLifecycleAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -19,6 +20,7 @@ import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @@ -56,4 +58,9 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient public boolean allowSystemIndexAccessByDefault() { return true; } + + @Override + public Set supportedCapabilities() { + return Set.of(DataStreamLifecycle.EFFECTIVE_RETENTION_REST_API_CAPABILITY); + } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java index 94724f6778013..00f9d4da88301 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.datastreams.lifecycle.GetDataStreamLifecycleAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -19,6 +20,7 @@ import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -54,4 +56,9 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli public boolean allowSystemIndexAccessByDefault() { return true; } + + @Override + public Set supportedCapabilities() { + return Set.of(DataStreamLifecycle.EFFECTIVE_RETENTION_REST_API_CAPABILITY); + } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java index 5acb59841d6a6..c3178208d51c2 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.datastreams.GetDataStreamAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -19,6 +20,7 @@ import org.elasticsearch.rest.action.RestToXContentListener; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -50,4 +52,9 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli public boolean allowSystemIndexAccessByDefault() { return true; } + + @Override + public Set supportedCapabilities() { + return Set.of(DataStreamLifecycle.EFFECTIVE_RETENTION_REST_API_CAPABILITY); + } } diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/40_effective_retention.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/40_effective_retention.yml new file mode 100644 index 0000000000000..ef36f283fe237 --- /dev/null +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/40_effective_retention.yml @@ -0,0 +1,104 @@ +setup: + - requires: + cluster_features: [ "gte_v8.11.0" ] + reason: "Data stream lifecycle was released as tech preview in 8.11" + test_runner_features: allowed_warnings + - do: + allowed_warnings: + - "index template [template-with-lifecycle] has index patterns [managed-data-stream] matching patterns from existing older templates [global] with patterns (global => [*]); this template [template-with-lifecycle] will take precedence during new index creation" + indices.put_index_template: + name: template-with-lifecycle + body: + index_patterns: [ managed-data-stream ] + template: + settings: + index.number_of_replicas: 0 + lifecycle: + data_retention: "30d" + data_stream: { } + - do: + indices.create_data_stream: + name: managed-data-stream +--- +teardown: + - do: + cluster.put_settings: + body: + persistent: + data_streams.lifecycle.retention.max: null + data_streams.lifecycle.retention.default: null + +--- +"Retrieve effective retention via the data stream API": + - requires: + reason: "Effective retention was exposed in 8.16+" + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_data_stream/{index} + capabilities: [ 'data_stream_lifecycle_effective_retention' ] + - do: + indices.get_data_stream: + name: "managed-data-stream" + - match: { data_streams.0.name: managed-data-stream } + - match: { data_streams.0.lifecycle.data_retention: '30d' } + - match: { data_streams.0.lifecycle.effective_retention: '30d'} + - match: { data_streams.0.lifecycle.retention_determined_by: 'data_stream_configuration'} + +--- +"Retrieve effective retention with explain": + - requires: + reason: "Effective retention was exposed in 8.16+" + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /{index}/_lifecycle/explain + capabilities: [ 'data_stream_lifecycle_effective_retention' ] + - do: + cluster.put_settings: + body: + persistent: + data_streams.lifecycle.retention.max: "7d" + - is_true: acknowledged + - do: + indices.get_data_stream: + name: "managed-data-stream" + - match: { data_streams.0.name: managed-data-stream } + - set: + data_streams.0.indices.0.index_name: backing_index + + - do: + indices.explain_data_lifecycle: + index: managed-data-stream + include_defaults: true + - match: { indices.$backing_index.managed_by_lifecycle: true } + - match: { indices.$backing_index.lifecycle.data_retention: '30d' } + - match: { indices.$backing_index.lifecycle.effective_retention: '7d' } + - match: { indices.$backing_index.lifecycle.retention_determined_by: 'max_global_retention' } + +--- +"Retrieve effective retention with data stream lifecycle": + - requires: + reason: "Effective retention was exposed in 8.16+" + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_data_stream/{index}/_lifecycle + capabilities: [ 'data_stream_lifecycle_effective_retention' ] + - do: + indices.put_data_lifecycle: + name: "managed-data-stream" + body: {} + - is_true: acknowledged + - do: + cluster.put_settings: + body: + persistent: + data_streams.lifecycle.retention.default: "7d" + - do: + indices.get_data_lifecycle: + name: "managed-data-stream" + - length: { data_streams: 1} + - match: { data_streams.0.name: managed-data-stream } + - match: { data_streams.0.lifecycle.effective_retention: '7d' } + - match: { data_streams.0.lifecycle.retention_determined_by: 'default_global_retention' } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 89282b8db3646..2fcc5ce3702c1 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -556,7 +556,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws for (DataStreamInfo dataStream : dataStreams) { dataStream.toXContent( builder, - DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + DataStreamLifecycle.addEffectiveRetentionParams(params), rolloverConfiguration, globalRetention ); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java index 4dc9ada5dc01f..d51f00681bb5e 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java @@ -217,7 +217,7 @@ public Iterator toXContentChunked(ToXContent.Params outerP builder.field(explainIndexDataLifecycle.getIndex()); explainIndexDataLifecycle.toXContent( builder, - DataStreamLifecycle.maybeAddEffectiveRetentionParams(outerParams), + DataStreamLifecycle.addEffectiveRetentionParams(outerParams), rolloverConfiguration, globalRetention ); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java index e038763169ef8..39427efbac4fd 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java @@ -187,7 +187,7 @@ public XContentBuilder toXContent( builder.field(LIFECYCLE_FIELD.getPreferredName()); lifecycle.toXContent( builder, - org.elasticsearch.cluster.metadata.DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + org.elasticsearch.cluster.metadata.DataStreamLifecycle.addEffectiveRetentionParams(params), rolloverConfiguration, isSystemDataStream ? null : globalRetention ); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index de9d615022975..cb09fb6108049 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -24,7 +24,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.rest.RestRequest; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.xcontent.AbstractObjectParser; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -55,6 +54,7 @@ public class DataStreamLifecycle implements SimpleDiffable, // Versions over the wire public static final TransportVersion ADDED_ENABLED_FLAG_VERSION = TransportVersions.V_8_10_X; + public static final String EFFECTIVE_RETENTION_REST_API_CAPABILITY = "data_stream_lifecycle_effective_retention"; public static final String DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME = "data_streams.lifecycle_only.mode"; // The following XContent params are used to enrich the DataStreamLifecycle json with effective retention information @@ -367,14 +367,12 @@ public static DataStreamLifecycle fromXContent(XContentParser parser) throws IOE } /** - * Adds a retention param to signal that this serialisation should include the effective retention metadata + * Adds a retention param to signal that this serialisation should include the effective retention metadata. + * @param params the XContent params to be extended with the new flag + * @return XContent params with `include_effective_retention` set to true. If the flag exists it will override it. */ - public static ToXContent.Params maybeAddEffectiveRetentionParams(ToXContent.Params params) { - boolean shouldAddEffectiveRetention = Objects.equals(params.param(RestRequest.PATH_RESTRICTED), "serverless"); - return new DelegatingMapParams( - Map.of(INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, Boolean.toString(shouldAddEffectiveRetention)), - params - ); + public static ToXContent.Params addEffectiveRetentionParams(ToXContent.Params params) { + return new DelegatingMapParams(INCLUDE_EFFECTIVE_RETENTION_PARAMS, params); } public static Builder newBuilder(DataStreamLifecycle lifecycle) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java index 50ab76ed794d8..a6ced9185dbad 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java @@ -39,7 +39,6 @@ import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.DATA_STREAM_CONFIGURATION; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.DEFAULT_GLOBAL_RETENTION; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.MAX_GLOBAL_RETENTION; -import static org.elasticsearch.rest.RestRequest.PATH_RESTRICTED; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -348,21 +347,11 @@ public void testEffectiveRetention() { } public void testEffectiveRetentionParams() { - { - ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams(new ToXContent.MapParams(Map.of())); - assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(false)); - } - { - ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams( - new ToXContent.MapParams(Map.of(PATH_RESTRICTED, "not-serverless")) - ); - assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(false)); - } - { - ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams( - new ToXContent.MapParams(Map.of(PATH_RESTRICTED, "serverless")) - ); - assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(true)); + Map initialParams = randomMap(0, 10, () -> Tuple.tuple(randomAlphaOfLength(10), randomAlphaOfLength(10))); + ToXContent.Params params = DataStreamLifecycle.addEffectiveRetentionParams(new ToXContent.MapParams(initialParams)); + assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(true)); + for (String key : initialParams.keySet()) { + assertThat(initialParams.get(key), equalTo(params.param(key))); } } From 0f176e1779b2869f8cb2b788aae5509bbbcf3725 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 22 Aug 2024 07:57:15 -0700 Subject: [PATCH 152/389] Remove leftover libsystemd references (#112078) Systemd notification now happens by directly communicating with the systemd socket. This commit removes the native access to libsystemd, which is no longer used. --- .../jna/JnaNativeLibraryProvider.java | 3 - .../nativeaccess/jna/JnaSystemdLibrary.java | 31 ----- .../nativeaccess/lib/NativeLibrary.java | 4 +- .../nativeaccess/lib/SystemdLibrary.java | 13 -- .../jdk/JdkNativeLibraryProvider.java | 3 - .../nativeaccess/jdk/JdkSystemdLibrary.java | 111 ------------------ 6 files changed, 2 insertions(+), 163 deletions(-) delete mode 100644 libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java delete mode 100644 libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java delete mode 100644 libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java index 454581ae70b51..79caf04c97246 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java @@ -15,7 +15,6 @@ import org.elasticsearch.nativeaccess.lib.NativeLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; import org.elasticsearch.nativeaccess.lib.VectorLibrary; import org.elasticsearch.nativeaccess.lib.ZstdLibrary; @@ -38,8 +37,6 @@ public JnaNativeLibraryProvider() { JnaMacCLibrary::new, Kernel32Library.class, JnaKernel32Library::new, - SystemdLibrary.class, - JnaSystemdLibrary::new, ZstdLibrary.class, JnaZstdLibrary::new, VectorLibrary.class, diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java deleted file mode 100644 index f06361e8807c5..0000000000000 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.nativeaccess.jna; - -import com.sun.jna.Library; -import com.sun.jna.Native; - -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; - -class JnaSystemdLibrary implements SystemdLibrary { - private interface NativeFunctions extends Library { - int sd_notify(int unset_environment, String state); - } - - private final NativeFunctions functions; - - JnaSystemdLibrary() { - this.functions = Native.load("libsystemd.so.0", NativeFunctions.class); - } - - @Override - public int sd_notify(int unset_environment, String state) { - return functions.sd_notify(unset_environment, state); - } -} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java index faa0e861dc63f..cdd0a56c52a90 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java @@ -9,5 +9,5 @@ package org.elasticsearch.nativeaccess.lib; /** A marker interface for libraries that can be loaded by {@link org.elasticsearch.nativeaccess.lib.NativeLibraryProvider} */ -public sealed interface NativeLibrary permits JavaLibrary, PosixCLibrary, LinuxCLibrary, MacCLibrary, Kernel32Library, SystemdLibrary, - VectorLibrary, ZstdLibrary {} +public sealed interface NativeLibrary permits JavaLibrary, PosixCLibrary, LinuxCLibrary, MacCLibrary, Kernel32Library, VectorLibrary, + ZstdLibrary {} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java deleted file mode 100644 index 3c4ffefb6e41f..0000000000000 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.nativeaccess.lib; - -public non-sealed interface SystemdLibrary extends NativeLibrary { - int sd_notify(int unset_environment, String state); -} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java index cbd43a394379b..1ac7d6c6f897d 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java @@ -14,7 +14,6 @@ import org.elasticsearch.nativeaccess.lib.MacCLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; import org.elasticsearch.nativeaccess.lib.VectorLibrary; import org.elasticsearch.nativeaccess.lib.ZstdLibrary; @@ -36,8 +35,6 @@ public JdkNativeLibraryProvider() { JdkMacCLibrary::new, Kernel32Library.class, JdkKernel32Library::new, - SystemdLibrary.class, - JdkSystemdLibrary::new, ZstdLibrary.class, JdkZstdLibrary::new, VectorLibrary.class, diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java deleted file mode 100644 index c34c8c070edc5..0000000000000 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.nativeaccess.jdk; - -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; - -import java.io.IOException; -import java.io.UncheckedIOException; -import java.lang.foreign.Arena; -import java.lang.foreign.FunctionDescriptor; -import java.lang.foreign.MemorySegment; -import java.lang.invoke.MethodHandle; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.attribute.BasicFileAttributes; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import static java.lang.foreign.ValueLayout.ADDRESS; -import static java.lang.foreign.ValueLayout.JAVA_INT; -import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle; - -class JdkSystemdLibrary implements SystemdLibrary { - - static { - // Find and load libsystemd. We attempt all instances of - // libsystemd in case of multiarch systems, and stop when - // one is successfully loaded. If none can be loaded, - // UnsatisfiedLinkError will be thrown. - List paths = findLibSystemd(); - if (paths.isEmpty()) { - String libpath = System.getProperty("java.library.path"); - throw new UnsatisfiedLinkError("Could not find libsystemd in java.library.path: " + libpath); - } - UnsatisfiedLinkError last = null; - for (String path : paths) { - try { - System.load(path); - last = null; - break; - } catch (UnsatisfiedLinkError e) { - last = e; - } - } - if (last != null) { - throw last; - } - } - - // findLibSystemd returns a list of paths to instances of libsystemd - // found within java.library.path. - static List findLibSystemd() { - // Note: on some systems libsystemd does not have a non-versioned symlink. - // System.loadLibrary only knows how to find non-versioned library files, - // so we must manually check the library path to find what we need. - final Path libsystemd = Paths.get("libsystemd.so.0"); - final String libpath = System.getProperty("java.library.path"); - final List foundPaths = new ArrayList<>(); - Arrays.stream(libpath.split(":")).map(Paths::get).filter(Files::exists).forEach(rootPath -> { - try { - Files.walkFileTree(rootPath, new SimpleFileVisitor<>() { - @Override - public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) { - if (Files.isReadable(dir)) { - return FileVisitResult.CONTINUE; - } - return FileVisitResult.SKIP_SUBTREE; - } - - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { - if (file.getFileName().equals(libsystemd)) { - foundPaths.add(file.toAbsolutePath().toString()); - } - return FileVisitResult.CONTINUE; - } - - @Override - public FileVisitResult visitFileFailed(Path file, IOException exc) { - return FileVisitResult.CONTINUE; - } - }); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }); - return foundPaths; - } - - private static final MethodHandle sd_notify$mh = downcallHandle("sd_notify", FunctionDescriptor.of(JAVA_INT, JAVA_INT, ADDRESS)); - - @Override - public int sd_notify(int unset_environment, String state) { - try (Arena arena = Arena.ofConfined()) { - MemorySegment nativeState = MemorySegmentUtil.allocateString(arena, state); - return (int) sd_notify$mh.invokeExact(unset_environment, nativeState); - } catch (Throwable t) { - throw new AssertionError(t); - } - } -} From 322d319a83a140c4af3e151598351ee06f1d7496 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 23 Aug 2024 01:39:25 +1000 Subject: [PATCH 153/389] Mute org.elasticsearch.xpack.esql.EsqlAsyncSecurityIT testLimitedPrivilege #112110 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index cd484b1c46867..581cbe6bd6025 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -182,6 +182,9 @@ tests: - class: org.elasticsearch.xpack.ml.integration.MlJobIT method: testDeleteJobAfterMissingIndex issue: https://github.com/elastic/elasticsearch/issues/112088 +- class: org.elasticsearch.xpack.esql.EsqlAsyncSecurityIT + method: testLimitedPrivilege + issue: https://github.com/elastic/elasticsearch/issues/112110 # Examples: # From 5534a1f8565f0be86930bd196df5b2e3d94f4eb3 Mon Sep 17 00:00:00 2001 From: Ankita Kumar Date: Thu, 22 Aug 2024 12:34:45 -0400 Subject: [PATCH 154/389] Fix Test Failure in SplitIndexIT (#112070) This PR fixes the testSplitIndexPrimaryTerm() test inside SplitIndexIT. Fixes #111282 --- muted-tests.yml | 3 --- .../action/admin/indices/create/SplitIndexIT.java | 1 + 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 581cbe6bd6025..fe2bb3d37dcd6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -85,9 +85,6 @@ tests: - class: org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectAuthIT method: testAuthenticateWithImplicitFlow issue: https://github.com/elastic/elasticsearch/issues/111191 -- class: org.elasticsearch.action.admin.indices.create.SplitIndexIT - method: testSplitIndexPrimaryTerm - issue: https://github.com/elastic/elasticsearch/issues/111282 - class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT issue: https://github.com/elastic/elasticsearch/issues/111319 - class: org.elasticsearch.xpack.ml.integration.InferenceIngestInputConfigIT diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 27fd54c39cc95..22549a1562dcd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -276,6 +276,7 @@ public void testSplitIndexPrimaryTerm() throws Exception { .put(indexSettings()) .put("number_of_shards", numberOfShards) .put("index.number_of_routing_shards", numberOfTargetShards) + .put("index.routing.rebalance.enable", EnableAllocationDecider.Rebalance.NONE) ).get(); ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to allocate many shards From 16a12ff43163f3d2f56febe5c57414b0bc313943 Mon Sep 17 00:00:00 2001 From: Vishal Raj Date: Thu, 22 Aug 2024 19:03:32 +0100 Subject: [PATCH 155/389] Revert "[plugin/apm-data] Set fallback to legacy ILM policies" (#112112) * Revert "[plugin/apm-data] Set fallback to legacy ILM policies (#112028)" This reverts commit fd37ef88c28744181d4628a05baed57098884bd9. --- .../resources/index-templates/logs-apm.app@template.yaml | 3 --- .../resources/index-templates/logs-apm.error@template.yaml | 3 --- .../resources/index-templates/metrics-apm.app@template.yaml | 3 --- .../index-templates/metrics-apm.internal@template.yaml | 3 --- .../metrics-apm.service_destination.10m@template.yaml | 3 --- .../metrics-apm.service_destination.1m@template.yaml | 3 --- .../metrics-apm.service_destination.60m@template.yaml | 3 --- .../metrics-apm.service_summary.10m@template.yaml | 3 --- .../metrics-apm.service_summary.1m@template.yaml | 3 --- .../metrics-apm.service_summary.60m@template.yaml | 3 --- .../metrics-apm.service_transaction.10m@template.yaml | 3 --- .../metrics-apm.service_transaction.1m@template.yaml | 3 --- .../metrics-apm.service_transaction.60m@template.yaml | 3 --- .../metrics-apm.transaction.10m@template.yaml | 3 --- .../index-templates/metrics-apm.transaction.1m@template.yaml | 3 --- .../metrics-apm.transaction.60m@template.yaml | 3 --- .../resources/index-templates/traces-apm.rum@template.yaml | 3 --- .../index-templates/traces-apm.sampled@template.yaml | 5 ----- .../main/resources/index-templates/traces-apm@template.yaml | 3 --- x-pack/plugin/apm-data/src/main/resources/resources.yaml | 2 +- 20 files changed, 1 insertion(+), 60 deletions(-) diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml index f74f1aa2e900e..21cad50f3fe90 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml @@ -23,6 +23,3 @@ template: index: default_pipeline: logs-apm.app@default-pipeline final_pipeline: apm@pipeline - lifecycle: - name: logs-apm.app_logs-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml index 0ab9f01a76c5c..2cfa7b454722f 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml @@ -30,6 +30,3 @@ template: index: default_pipeline: logs-apm.error@default-pipeline final_pipeline: apm@pipeline - lifecycle: - name: logs-apm.error_logs-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml index 5659a5c2cbd55..a3c7ab7c05193 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml @@ -24,6 +24,3 @@ template: index: default_pipeline: metrics-apm.app@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.app_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml index 8e5fca051aaeb..4c7df377a6cfa 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml @@ -25,9 +25,6 @@ template: index: default_pipeline: metrics-apm.internal@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.internal_metrics-default_policy - prefer_ilm: false mappings: properties: data_stream.dataset: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml index 23db583d3a30f..63c9ff9c3b988 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml @@ -27,9 +27,6 @@ template: index: default_pipeline: metrics-apm.service_destination@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_destination_10m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml index 4cbeb5053d072..6995a2d09b12e 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml @@ -26,9 +26,6 @@ template: index: default_pipeline: metrics-apm.service_destination@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_destination_1m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml index d29f953cb73a1..b39d0beca3740 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml @@ -27,9 +27,6 @@ template: index: default_pipeline: metrics-apm.service_destination@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_destination_60m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml index 57f63b9ed7dcc..8d92b21866bb8 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml @@ -27,9 +27,6 @@ template: index: default_pipeline: metrics-apm.service_summary@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_summary_10m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml index 6b8e604e3f03e..de19df330aa0e 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml @@ -26,9 +26,6 @@ template: index: default_pipeline: metrics-apm.service_summary@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_summary_1m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml index 1c16e20a34f51..002676eb08cc1 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml @@ -27,9 +27,6 @@ template: index: default_pipeline: metrics-apm.service_summary@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_summary_60m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml index db85407599f67..549af3942dcd3 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml @@ -27,9 +27,6 @@ template: index: default_pipeline: metrics-apm.service_transaction@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_transaction_10m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml index 9e3220b2c4c3a..9bdacfc337663 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml @@ -26,9 +26,6 @@ template: index: default_pipeline: metrics-apm.service_transaction@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_transaction_1m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml index c10435b2b50a6..8bcbeb53c74fe 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml @@ -27,9 +27,6 @@ template: index: default_pipeline: metrics-apm.service_transaction@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_transaction_60m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml index 92c6a430a377d..68c1dc0f31c1e 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml @@ -27,9 +27,6 @@ template: index: default_pipeline: metrics-apm.transaction@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.transaction_10m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml index 78ed0959f270f..6065f6e12f999 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml @@ -26,9 +26,6 @@ template: index: default_pipeline: metrics-apm.transaction@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.transaction_1m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml index 3625ecfc1458b..d8889ceb63f87 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml @@ -27,9 +27,6 @@ template: index: default_pipeline: metrics-apm.transaction@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.transaction_60m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml index 53647284d2b91..d299481ff6e21 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml @@ -25,9 +25,6 @@ template: index: default_pipeline: traces-apm.rum@default-pipeline final_pipeline: traces-apm@pipeline - lifecycle: - name: traces-apm.rum_traces-default_policy - prefer_ilm: false mappings: properties: data_stream.type: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml index 9cffe241e0979..81457e2f204cb 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml @@ -20,11 +20,6 @@ ignore_missing_component_templates: template: lifecycle: data_retention: 1h - settings: - index: - lifecycle: - name: traces-apm.sampled_traces-default_policy - prefer_ilm: false mappings: properties: data_stream.type: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml index bcf406faa71da..fda953171b793 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml @@ -24,9 +24,6 @@ template: index: default_pipeline: traces-apm@default-pipeline final_pipeline: traces-apm@pipeline - lifecycle: - name: traces-apm.traces-default_policy - prefer_ilm: false mappings: properties: data_stream.type: diff --git a/x-pack/plugin/apm-data/src/main/resources/resources.yaml b/x-pack/plugin/apm-data/src/main/resources/resources.yaml index cd2111ffb9f83..3e66769d939ad 100644 --- a/x-pack/plugin/apm-data/src/main/resources/resources.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/resources.yaml @@ -1,7 +1,7 @@ # "version" holds the version of the templates and ingest pipelines installed # by xpack-plugin apm-data. This must be increased whenever an existing template or # pipeline is changed, in order for it to be updated on Elasticsearch upgrade. -version: 7 +version: 8 component-templates: # Data lifecycle. From d802e6fd11114a2a425d6dd8023c3a8b17144513 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 22 Aug 2024 16:04:32 -0400 Subject: [PATCH 156/389] Test infra: Catch and report errors building error (#112109) This modifies our `matchesMap` and `matchesList` infrastructure to report when it encounters an error building the description of an error. This looks something like: ``` a map containing foo: expected "val" but error describing bar: ``` This preserves the original error message while also giving you the context for the actual failure. Relates to #112039 Relates to #112049 --- muted-tests.yml | 6 --- .../org/elasticsearch/test/MapMatcher.java | 21 ++++++--- .../elasticsearch/test/MapMatcherTests.java | 43 +++++++++++++++++++ 3 files changed, 58 insertions(+), 12 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index fe2bb3d37dcd6..e23763ba2cdc0 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -167,12 +167,6 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.FieldExtractorIT method: testScaledFloat issue: https://github.com/elastic/elasticsearch/issues/112003 -- class: org.elasticsearch.xpack.esql.qa.single_node.RestEsqlIT - method: testForceSleepsProfile {SYNC} - issue: https://github.com/elastic/elasticsearch/issues/112039 -- class: org.elasticsearch.xpack.esql.qa.single_node.RestEsqlIT - method: testForceSleepsProfile {ASYNC} - issue: https://github.com/elastic/elasticsearch/issues/112049 - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/80_random_rerank_retriever/Random rerank retriever predictably shuffles results} issue: https://github.com/elastic/elasticsearch/issues/111999 diff --git a/test/framework/src/main/java/org/elasticsearch/test/MapMatcher.java b/test/framework/src/main/java/org/elasticsearch/test/MapMatcher.java index 7a788eaacc6d4..b702809de5bed 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/MapMatcher.java +++ b/test/framework/src/main/java/org/elasticsearch/test/MapMatcher.java @@ -14,6 +14,8 @@ import org.hamcrest.StringDescription; import org.hamcrest.TypeSafeMatcher; +import java.io.PrintWriter; +import java.io.StringWriter; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; @@ -292,17 +294,24 @@ static void describeEntryUnexepectedButOk(Object value, Description description) } static void describeEntryValue(int keyWidth, Matcher matcher, Object v, Description description) { - if (v instanceof Map && matcher instanceof MapMatcher) { - ((MapMatcher) matcher).describePotentialMismatch(keyWidth + INDENT, (Map) v, description); + if (v instanceof Map && matcher instanceof MapMatcher mm) { + mm.describePotentialMismatch(keyWidth + INDENT, (Map) v, description); return; } - if (v instanceof List && matcher instanceof ListMatcher) { - ((ListMatcher) matcher).describePotentialMismatch(keyWidth + INDENT, (List) v, description); + if (v instanceof List && matcher instanceof ListMatcher lm) { + lm.describePotentialMismatch(keyWidth + INDENT, (List) v, description); return; } if (false == matcher.matches(v)) { - description.appendText("expected ").appendDescriptionOf(matcher).appendText(" but "); - matcher.describeMismatch(v, description); + try { + description.appendText("expected ").appendDescriptionOf(matcher).appendText(" but "); + matcher.describeMismatch(v, description); + } catch (Exception e) { + description.appendText("error describing "); + StringWriter trace = new StringWriter(); + e.printStackTrace(new PrintWriter(trace)); + description.appendValue(trace); + } return; } description.appendValue(v); diff --git a/test/framework/src/test/java/org/elasticsearch/test/MapMatcherTests.java b/test/framework/src/test/java/org/elasticsearch/test/MapMatcherTests.java index 48c9fcab3898a..3822c0d93d28d 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/MapMatcherTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/MapMatcherTests.java @@ -11,8 +11,10 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.json.JsonXContent; +import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.StringDescription; +import org.hamcrest.TypeSafeMatcher; import java.io.IOException; import java.io.InputStream; @@ -24,7 +26,9 @@ import static org.elasticsearch.test.ListMatcher.matchesList; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; +import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -395,6 +399,45 @@ public void testSubMapDescribeTo() { baz: <0>""")); } + public void testSubMatcherDescribeFails() { + assertMismatch(Map.of("foo", 2.0, "bar", 2), matchesMap().entry("foo", new TypeSafeMatcher() { + @Override + public void describeTo(Description description) { + throw new IllegalStateException("intentional failure"); + } + + @Override + protected boolean matchesSafely(Object o) { + return false; + } + }).entry("bar", 2), both(containsString(""" + a map containing + foo: expected error describing """))); + } + + public void testSubMatcherMismatchFails() { + assertMismatch(Map.of("foo", 2.0, "bar", 2), matchesMap().entry("foo", new TypeSafeMatcher() { + @Override + protected void describeMismatchSafely(Object item, Description mismatchDescription) { + throw new IllegalStateException("intentional failure"); + } + + @Override + public void describeTo(Description description) { + description.appendValue("foo"); + } + + @Override + protected boolean matchesSafely(Object o) { + return false; + } + }).entry("bar", 2), both(containsString(""" + a map containing + foo: expected "foo" but error describing """))); + } + static void assertMismatch(T v, Matcher matcher, Matcher mismatchDescriptionMatcher) { assertMap(v, not(matcher)); StringDescription description = new StringDescription(); From 5dcdc34927f0a0b87820250f2b6d7cf982dc13cf Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 23 Aug 2024 06:43:13 +1000 Subject: [PATCH 157/389] Mute org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT test {stats.ByTwoCalculatedSecondOverwrites SYNC} #112117 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index e23763ba2cdc0..cc459b7cccdf2 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -176,6 +176,9 @@ tests: - class: org.elasticsearch.xpack.esql.EsqlAsyncSecurityIT method: testLimitedPrivilege issue: https://github.com/elastic/elasticsearch/issues/112110 +- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT + method: test {stats.ByTwoCalculatedSecondOverwrites SYNC} + issue: https://github.com/elastic/elasticsearch/issues/112117 # Examples: # From 7b1d2a254341701f0a783826894089c99e0b96d3 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 23 Aug 2024 06:44:31 +1000 Subject: [PATCH 158/389] Mute org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT test {stats.ByTwoCalculatedSecondOverwritesReferencingFirst SYNC} #112118 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index cc459b7cccdf2..ec097616c2af6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -179,6 +179,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {stats.ByTwoCalculatedSecondOverwrites SYNC} issue: https://github.com/elastic/elasticsearch/issues/112117 +- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT + method: test {stats.ByTwoCalculatedSecondOverwritesReferencingFirst SYNC} + issue: https://github.com/elastic/elasticsearch/issues/112118 # Examples: # From 14b7170921f2f0e4109255b83cb9af175385d87f Mon Sep 17 00:00:00 2001 From: Nick Tindall Date: Fri, 23 Aug 2024 09:13:41 +1000 Subject: [PATCH 159/389] Don't fail retention lease sync actions due to capacity constraints (#109414) Closes #105926 --- docs/changelog/109414.yaml | 6 ++ .../BackgroundRetentionLeaseSyncActionIT.java | 75 ++++++++++++++ .../seqno/RetentionLeaseSyncActionIT.java | 98 +++++++++++++++++++ .../RetentionLeaseBackgroundSyncAction.java | 2 +- .../index/seqno/RetentionLeaseSyncAction.java | 4 +- 5 files changed, 182 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/109414.yaml create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/index/seqno/BackgroundRetentionLeaseSyncActionIT.java create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionIT.java diff --git a/docs/changelog/109414.yaml b/docs/changelog/109414.yaml new file mode 100644 index 0000000000000..81b7541bde35b --- /dev/null +++ b/docs/changelog/109414.yaml @@ -0,0 +1,6 @@ +pr: 109414 +summary: Don't fail retention lease sync actions due to capacity constraints +area: CRUD +type: bug +issues: + - 105926 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/BackgroundRetentionLeaseSyncActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/BackgroundRetentionLeaseSyncActionIT.java new file mode 100644 index 0000000000000..0bab5be245ecf --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/BackgroundRetentionLeaseSyncActionIT.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.stream.Stream; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class BackgroundRetentionLeaseSyncActionIT extends ESIntegTestCase { + + public void testActionCompletesWhenReplicaCircuitBreakersAreAtCapacity() throws Exception { + internalCluster().startMasterOnlyNodes(1); + String primary = internalCluster().startDataOnlyNode(); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ); + + String replica = internalCluster().startDataOnlyNode(); + ensureGreen("test"); + + try (var ignored = fullyAllocateCircuitBreakerOnNode(replica, CircuitBreaker.IN_FLIGHT_REQUESTS)) { + final ClusterState state = internalCluster().clusterService().state(); + final Index testIndex = resolveIndex("test"); + final ShardId testIndexShardZero = new ShardId(testIndex, 0); + final String testLeaseId = "test-lease/123"; + RetentionLeases newLeases = addTestLeaseToRetentionLeases(primary, testIndex, testLeaseId); + internalCluster().getInstance(RetentionLeaseSyncer.class, primary) + .backgroundSync( + testIndexShardZero, + state.routingTable().shardRoutingTable(testIndexShardZero).primaryShard().allocationId().getId(), + state.term(), + newLeases + ); + + // Wait for test lease to appear on replica + IndicesService replicaIndicesService = internalCluster().getInstance(IndicesService.class, replica); + assertBusy(() -> { + RetentionLeases retentionLeases = replicaIndicesService.indexService(testIndex).getShard(0).getRetentionLeases(); + assertTrue(retentionLeases.contains(testLeaseId)); + }); + } + } + + private static RetentionLeases addTestLeaseToRetentionLeases(String primaryNodeName, Index index, String leaseId) { + IndicesService primaryIndicesService = internalCluster().getInstance(IndicesService.class, primaryNodeName); + RetentionLeases currentLeases = primaryIndicesService.indexService(index).getShard(0).getRetentionLeases(); + RetentionLease newLease = new RetentionLease(leaseId, 0, System.currentTimeMillis(), "test source"); + return new RetentionLeases( + currentLeases.primaryTerm(), + currentLeases.version() + 1, + Stream.concat(currentLeases.leases().stream(), Stream.of(newLease)).toList() + ); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionIT.java new file mode 100644 index 0000000000000..2d8f455792172 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionIT.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESIntegTestCase; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RetentionLeaseSyncActionIT extends ESIntegTestCase { + + public void testActionCompletesWhenReplicaCircuitBreakersAreAtCapacity() { + internalCluster().startMasterOnlyNodes(1); + String primary = internalCluster().startDataOnlyNode(); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ); + + String replica = internalCluster().startDataOnlyNode(); + ensureGreen("test"); + + try (var ignored = fullyAllocateCircuitBreakerOnNode(replica, CircuitBreaker.IN_FLIGHT_REQUESTS)) { + assertThatRetentionLeaseSyncCompletesSuccessfully(primary); + } + } + + public void testActionCompletesWhenPrimaryIndexingPressureIsAtCapacity() { + internalCluster().startMasterOnlyNodes(1); + String primary = internalCluster().startDataOnlyNode(); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ); + + String replica = internalCluster().startDataOnlyNode(); + ensureGreen("test"); + + try (Releasable ignored = fullyAllocatePrimaryIndexingCapacityOnNode(primary)) { + assertThatRetentionLeaseSyncCompletesSuccessfully(primary); + } + } + + private static void assertThatRetentionLeaseSyncCompletesSuccessfully(String primaryNodeName) { + RetentionLeaseSyncer instance = internalCluster().getInstance(RetentionLeaseSyncer.class, primaryNodeName); + PlainActionFuture retentionLeaseSyncResult = new PlainActionFuture<>(); + ClusterState state = internalCluster().clusterService().state(); + ShardId testIndexShardZero = new ShardId(resolveIndex("test"), 0); + ShardRouting primaryShard = state.routingTable().shardRoutingTable(testIndexShardZero).primaryShard(); + instance.sync( + testIndexShardZero, + primaryShard.allocationId().getId(), + state.term(), + RetentionLeases.EMPTY, + retentionLeaseSyncResult + ); + safeGet(retentionLeaseSyncResult); + } + + /** + * Fully allocate primary indexing capacity on a node + * + * @param targetNode The name of the node on which to allocate + * @return A {@link Releasable} which will release the capacity when closed + */ + private static Releasable fullyAllocatePrimaryIndexingCapacityOnNode(String targetNode) { + return internalCluster().getInstance(IndexingPressure.class, targetNode) + .markPrimaryOperationStarted( + 1, + IndexingPressure.MAX_INDEXING_BYTES.get(internalCluster().getInstance(Settings.class, targetNode)).getBytes() + 1, + true + ); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java index a7fa88633b806..f90d8945857b7 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java @@ -84,7 +84,7 @@ public RetentionLeaseBackgroundSyncAction( threadPool.executor(ThreadPool.Names.MANAGEMENT), SyncGlobalCheckpointAfterOperation.DoNotSync, PrimaryActionExecution.RejectOnOverload, - ReplicaActionExecution.SubjectToCircuitBreaker + ReplicaActionExecution.BypassCircuitBreaker ); } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java index b7d632eab3bc5..67ed7c6e4c191 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java @@ -91,10 +91,10 @@ public RetentionLeaseSyncAction( RetentionLeaseSyncAction.Request::new, RetentionLeaseSyncAction.Request::new, new ManagementOnlyExecutorFunction(threadPool), - PrimaryActionExecution.RejectOnOverload, + PrimaryActionExecution.Force, indexingPressure, systemIndices, - ReplicaActionExecution.SubjectToCircuitBreaker + ReplicaActionExecution.BypassCircuitBreaker ); } From 1072f2bbab64d49244d9592239c4c28a514c2237 Mon Sep 17 00:00:00 2001 From: Parker Timmins Date: Thu, 22 Aug 2024 21:15:29 -0500 Subject: [PATCH 160/389] Add interval based SLM scheduling (#110847) Add the ability to schedule an SLM policies with a time unit interval schedule rather than a cron job schedule. For example, an slm policy can be created with the argument "schedule":"30m". This will create a policy that will run 30 minutes after the policy modification_date. It will then run again every time another 30 minutes has passed. Every time the policy is changed, the next snapshot will be re-scheduled to run one interval after the new modification date. --- docs/changelog/110847.yaml | 5 + docs/reference/slm/apis/slm-put.asciidoc | 31 +++- .../lifecycle/DataStreamLifecycleService.java | 8 +- .../common/scheduler/SchedulerEngine.java | 60 +++---- .../health/HealthPeriodicLogger.java | 2 +- .../scheduler/SchedulerEngineTests.java | 2 +- .../license/ClusterStateLicenseService.java | 6 +- .../core/slm/SnapshotLifecyclePolicy.java | 79 ++++++-- .../core/slm/SnapshotLifecyclePolicyItem.java | 3 +- .../slm/SnapshotLifecyclePolicyMetadata.java | 5 + .../slm/SnapshotLifecyclePolicyItemTests.java | 2 +- .../SnapshotLifecyclePolicyMetadataTests.java | 41 ++++- .../IndexLifecycleInitialisationTests.java | 4 +- .../xpack/ilm/IndexLifecycleService.java | 4 +- .../xpack/rollup/job/RollupJobTask.java | 4 +- .../xpack/slm/SnapshotLifecycleRestIT.java | 41 ++++- .../xpack/slm/SnapshotLifecycle.java | 6 +- .../xpack/slm/SnapshotLifecycleFeatures.java | 6 + .../xpack/slm/SnapshotLifecycleService.java | 25 ++- .../xpack/slm/SnapshotLifecycleTask.java | 8 +- .../xpack/slm/SnapshotRetentionTask.java | 8 +- .../slm/action/ReservedSnapshotAction.java | 8 +- .../TransportPutSnapshotLifecycleAction.java | 8 +- .../slm/SnapshotLifecyclePolicyTests.java | 170 +++++++++++++++++- .../slm/SnapshotLifecycleServiceTests.java | 143 +++++++++++---- .../slm/SnapshotRetentionServiceTests.java | 5 +- ...vedSnapshotLifecycleStateServiceTests.java | 64 ++++++- .../history/SnapshotHistoryStoreTests.java | 13 +- 28 files changed, 611 insertions(+), 150 deletions(-) create mode 100644 docs/changelog/110847.yaml diff --git a/docs/changelog/110847.yaml b/docs/changelog/110847.yaml new file mode 100644 index 0000000000000..214adc97ac7cb --- /dev/null +++ b/docs/changelog/110847.yaml @@ -0,0 +1,5 @@ +pr: 110847 +summary: SLM Interval based scheduling +area: ILM+SLM +type: feature +issues: [] diff --git a/docs/reference/slm/apis/slm-put.asciidoc b/docs/reference/slm/apis/slm-put.asciidoc index be265554deef5..51ad571ee12e7 100644 --- a/docs/reference/slm/apis/slm-put.asciidoc +++ b/docs/reference/slm/apis/slm-put.asciidoc @@ -100,13 +100,19 @@ Minimum number of snapshots to retain, even if the snapshots have expired. ==== `schedule`:: -(Required, <>) +(Required, <> or <>) Periodic or absolute schedule at which the policy creates snapshots. {slm-init} applies `schedule` changes immediately. +Schedule may be either a Cron schedule or a time unit describing the interval between snapshots. +When using a time unit interval, the first snapshot is scheduled one interval after the policy modification time, and then again every interval after. + [[slm-api-put-example]] ==== {api-examples-title} + +[[slm-api-put-daily-policy]] +===== Create a policy Create a `daily-snapshots` lifecycle policy: [source,console] @@ -138,4 +144,25 @@ PUT /_slm/policy/daily-snapshots <6> Optional retention configuration <7> Keep snapshots for 30 days <8> Always keep at least 5 successful snapshots, even if they're more than 30 days old -<9> Keep no more than 50 successful snapshots, even if they're less than 30 days old \ No newline at end of file +<9> Keep no more than 50 successful snapshots, even if they're less than 30 days old + + +[[slm-api-put-hourly-policy]] +===== Use Interval Scheduling +Create an `hourly-snapshots` lifecycle policy using interval scheduling: + +[source,console] +-------------------------------------------------- +PUT /_slm/policy/hourly-snapshots +{ + "schedule": "1h", + "name": "", + "repository": "my_repository", + "config": { + "indices": ["data-*", "important"] + } +} +-------------------------------------------------- +// TEST[setup:setup-repository] +Creates a snapshot once every hour. The first snapshot will be created one hour after the policy is modified, +with subsequent snapshots being created every hour afterward. diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 0cb29dbcf5b2f..0b24a3c9c9101 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -296,13 +296,13 @@ public void close() { @Override public void triggered(SchedulerEngine.Event event) { - if (event.getJobName().equals(LIFECYCLE_JOB_NAME)) { + if (event.jobName().equals(LIFECYCLE_JOB_NAME)) { if (this.isMaster) { logger.trace( "Data stream lifecycle job triggered: {}, {}, {}", - event.getJobName(), - event.getScheduledTime(), - event.getTriggeredTime() + event.jobName(), + event.scheduledTime(), + event.triggeredTime() ); run(clusterService.state()); dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() { diff --git a/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java b/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java index be4d7c741bc92..ab63ab4062767 100644 --- a/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java +++ b/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.core.Nullable; import java.time.Clock; import java.util.Collection; @@ -39,47 +40,27 @@ */ public class SchedulerEngine { - public static class Job { - private final String id; - private final Schedule schedule; - + /** + * In most cases a Job only requires a `schedule` and an `id`, but an optional `fixedStartTime` + * can also be used. This is used as a fixed `startTime` argument for all calls to + * `schedule.nextScheduledTimeAfter(startTime, now)`. Interval-based schedules use `startTime` + * as a basis time from which all run times are calculated. If a Job does not contain a + * `fixedStartTime`, this basis time will be the time at which the Job is added to the SchedulerEngine. + * This could change if a master change or restart causes a new SchedulerEngine to be constructed. + * But using a `fixedStartTime` populated from a time stored in cluster state allows the basis time + * to remain unchanged across master changes and restarts. + * + * @param id the id of the job + * @param schedule the schedule which is used to calculate when the job runs + * @param fixedStartTime a fixed time in the past which the schedule uses to calculate run times, + */ + public record Job(String id, Schedule schedule, @Nullable Long fixedStartTime) { public Job(String id, Schedule schedule) { - this.id = id; - this.schedule = schedule; - } - - public String getId() { - return id; - } - - public Schedule getSchedule() { - return schedule; + this(id, schedule, null); } } - public static class Event { - private final String jobName; - private final long triggeredTime; - private final long scheduledTime; - - public Event(String jobName, long triggeredTime, long scheduledTime) { - this.jobName = jobName; - this.triggeredTime = triggeredTime; - this.scheduledTime = scheduledTime; - } - - public String getJobName() { - return jobName; - } - - public long getTriggeredTime() { - return triggeredTime; - } - - public long getScheduledTime() { - return scheduledTime; - } - + public record Event(String jobName, long triggeredTime, long scheduledTime) { @Override public String toString() { return "Event[jobName=" + jobName + "," + "triggeredTime=" + triggeredTime + "," + "scheduledTime=" + scheduledTime + "]"; @@ -159,12 +140,13 @@ public Set scheduledJobIds() { } public void add(Job job) { - ActiveSchedule schedule = new ActiveSchedule(job.getId(), job.getSchedule(), clock.millis()); + final long startTime = job.fixedStartTime() == null ? clock.millis() : job.fixedStartTime(); + ActiveSchedule schedule = new ActiveSchedule(job.id(), job.schedule(), startTime); schedules.compute(schedule.name, (name, previousSchedule) -> { if (previousSchedule != null) { previousSchedule.cancel(); } - logger.debug(() -> "added job [" + job.getId() + "]"); + logger.debug(() -> "added job [" + job.id() + "]"); return schedule; }); } diff --git a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java index 8208e4bd70c34..97c0679bed34f 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java +++ b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java @@ -299,7 +299,7 @@ protected void doClose() throws IOException { @Override public void triggered(SchedulerEngine.Event event) { - if (event.getJobName().equals(HEALTH_PERIODIC_LOGGER_JOB_NAME) && this.enabled) { + if (event.jobName().equals(HEALTH_PERIODIC_LOGGER_JOB_NAME) && this.enabled) { this.tryToLogHealth(); } } diff --git a/server/src/test/java/org/elasticsearch/common/scheduler/SchedulerEngineTests.java b/server/src/test/java/org/elasticsearch/common/scheduler/SchedulerEngineTests.java index e10898da978be..8672189220a9f 100644 --- a/server/src/test/java/org/elasticsearch/common/scheduler/SchedulerEngineTests.java +++ b/server/src/test/java/org/elasticsearch/common/scheduler/SchedulerEngineTests.java @@ -166,7 +166,7 @@ public void testCancellingDuringRunPreventsRescheduling() throws Exception { final String jobId = randomAlphaOfLength(4); try { engine.register(event -> { - assertThat(event.getJobName(), is(jobId)); + assertThat(event.jobName(), is(jobId)); calledCount.incrementAndGet(); jobRunningLatch.countDown(); try { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java index b352a9abce886..f5123b9352fe3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java @@ -289,11 +289,11 @@ public void triggered(SchedulerEngine.Event event) { final LicensesMetadata licensesMetadata = getLicensesMetadata(); if (licensesMetadata != null) { final License license = licensesMetadata.getLicense(); - if (event.getJobName().equals(LICENSE_JOB)) { + if (event.jobName().equals(LICENSE_JOB)) { updateXPackLicenseState(license); - } else if (event.getJobName().startsWith(ExpirationCallback.EXPIRATION_JOB_PREFIX)) { + } else if (event.jobName().startsWith(ExpirationCallback.EXPIRATION_JOB_PREFIX)) { expirationCallbacks.stream() - .filter(expirationCallback -> expirationCallback.getId().equals(event.getJobName())) + .filter(expirationCallback -> expirationCallback.getId().equals(event.jobName())) .forEach(expirationCallback -> expirationCallback.on(license)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java index fb892a318f07c..23bf21004040a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.slm; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.elasticsearch.cluster.SimpleDiffable; @@ -15,6 +14,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.scheduler.SchedulerEngine; +import org.elasticsearch.common.scheduler.TimeValueSchedule; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.snapshots.SnapshotsService; @@ -24,9 +25,11 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.scheduler.Cron; +import org.elasticsearch.xpack.core.scheduler.CronSchedule; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.time.Clock; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -48,6 +51,7 @@ public class SnapshotLifecyclePolicy implements SimpleDiffable configuration; private final SnapshotRetentionConfiguration retentionPolicy; + private final boolean isCronSchedule; private static final ParseField NAME = new ParseField("name"); private static final ParseField SCHEDULE = new ParseField("schedule"); @@ -92,6 +96,7 @@ public SnapshotLifecyclePolicy( this.repository = Objects.requireNonNull(repository, "policy snapshot repository is required"); this.configuration = configuration; this.retentionPolicy = retentionPolicy; + this.isCronSchedule = isCronSchedule(schedule); } public SnapshotLifecyclePolicy(StreamInput in) throws IOException { @@ -101,6 +106,7 @@ public SnapshotLifecyclePolicy(StreamInput in) throws IOException { this.repository = in.readString(); this.configuration = in.readGenericMap(); this.retentionPolicy = in.readOptionalWriteable(SnapshotRetentionConfiguration::new); + this.isCronSchedule = isCronSchedule(schedule); } public String getId() { @@ -129,9 +135,43 @@ public SnapshotRetentionConfiguration getRetentionPolicy() { return this.retentionPolicy; } - public long calculateNextExecution() { - final Cron scheduleEvaluator = new Cron(this.schedule); - return scheduleEvaluator.getNextValidTimeAfter(System.currentTimeMillis()); + boolean isCronSchedule() { + return this.isCronSchedule; + } + + /** + * @return whether `schedule` is a cron expression + */ + static boolean isCronSchedule(String schedule) { + try { + new Cron(schedule); + return true; + } catch (IllegalArgumentException e) { + return false; + } + } + + /** + * @return whether `schedule` is an interval time unit expression + */ + public static boolean isIntervalSchedule(String schedule) { + try { + TimeValue.parseTimeValue(schedule, "schedule"); + return true; + } catch (IllegalArgumentException e) { + return false; + } + } + + public long calculateNextExecution(long modifiedDate, Clock clock) { + if (isCronSchedule()) { + final Cron scheduleEvaluator = new Cron(this.schedule); + return scheduleEvaluator.getNextValidTimeAfter(clock.millis()); + } else { + final TimeValue interval = TimeValue.parseTimeValue(this.schedule, SCHEDULE.getPreferredName()); + final TimeValueSchedule timeValueSchedule = new TimeValueSchedule(interval); + return timeValueSchedule.nextScheduledTimeAfter(modifiedDate, clock.millis()); + } } /** @@ -139,13 +179,17 @@ public long calculateNextExecution() { *

    * In ordinary cases, this can be treated as the interval between executions of the schedule (for schedules like 'twice an hour' or * 'every five minutes'). - * + * @param clock a clock to provide current time * @return a {@link TimeValue} representing the difference between the next two valid times after now, or {@link TimeValue#MINUS_ONE} * if either of the next two times after now is unsupported according to @{@link Cron#getNextValidTimeAfter(long)} */ - public TimeValue calculateNextInterval() { + public TimeValue calculateNextInterval(Clock clock) { + if (isCronSchedule() == false) { + return TimeValue.parseTimeValue(schedule, SCHEDULE.getPreferredName()); + } + final Cron scheduleEvaluator = new Cron(this.schedule); - long next1 = scheduleEvaluator.getNextValidTimeAfter(System.currentTimeMillis()); + long next1 = scheduleEvaluator.getNextValidTimeAfter(clock.millis()); long next2 = scheduleEvaluator.getNextValidTimeAfter(next1); if (next1 > 0 && next2 > 0) { return TimeValue.timeValueMillis(next2 - next1); @@ -154,6 +198,15 @@ public TimeValue calculateNextInterval() { } } + public SchedulerEngine.Job buildSchedulerJob(String jobId, long modifiedDate) { + if (isCronSchedule()) { + return new SchedulerEngine.Job(jobId, new CronSchedule(schedule)); + } else { + TimeValue timeValue = TimeValue.parseTimeValue(schedule, "schedule"); + return new SchedulerEngine.Job(jobId, new TimeValueSchedule(timeValue), modifiedDate); + } + } + public ActionRequestValidationException validate() { ActionRequestValidationException err = new ActionRequestValidationException(); @@ -182,13 +235,19 @@ public ActionRequestValidationException validate() { } // Schedule validation + // n.b. there's more validation beyond this in SnapshotLifecycleService#validateMinimumInterval if (Strings.hasText(schedule) == false) { err.addValidationError("invalid schedule [" + schedule + "]: must not be empty"); } else { try { - new Cron(schedule); - } catch (IllegalArgumentException e) { - err.addValidationError("invalid schedule: " + ExceptionsHelper.unwrapCause(e).getMessage()); + var intervalTimeValue = TimeValue.parseTimeValue(schedule, SCHEDULE.getPreferredName()); + if (intervalTimeValue.millis() == 0) { + err.addValidationError("invalid schedule [" + schedule + "]: time unit must be at least 1 millisecond"); + } + } catch (IllegalArgumentException e1) { + if (isCronSchedule(schedule) == false) { + err.addValidationError("invalid schedule [" + schedule + "]: must be a valid cron expression or time unit"); + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItem.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItem.java index 6a352461c2e1e..c3c70e595eb75 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItem.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItem.java @@ -20,6 +20,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.time.Clock; import java.util.Objects; /** @@ -171,7 +172,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.timeField( SnapshotLifecyclePolicyMetadata.NEXT_EXECUTION_MILLIS.getPreferredName(), SnapshotLifecyclePolicyMetadata.NEXT_EXECUTION.getPreferredName(), - policy.calculateNextExecution() + policy.calculateNextExecution(modifiedDate, Clock.systemUTC()) ); if (snapshotInProgress != null) { builder.field(SNAPSHOT_IN_PROGRESS.getPreferredName(), snapshotInProgress); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java index 0a97810fadacf..672578787762e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java @@ -11,6 +11,7 @@ import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.scheduler.SchedulerEngine; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; @@ -181,6 +182,10 @@ public long getInvocationsSinceLastSuccess() { return invocationsSinceLastSuccess; } + public SchedulerEngine.Job buildSchedulerJob(String jobId) { + return policy.buildSchedulerJob(jobId, modifiedDate); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItemTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItemTests.java index 3eeaa18f0a81e..2dd1d8d4ec13a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItemTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItemTests.java @@ -67,7 +67,7 @@ protected SnapshotLifecyclePolicyItem mutateInstance(SnapshotLifecyclePolicyItem return new SnapshotLifecyclePolicyItem( instance.getPolicy(), instance.getVersion(), - randomValueOtherThan(instance.getModifiedDate(), ESTestCase::randomNonNegativeLong), + randomValueOtherThan(instance.getModifiedDate(), SnapshotLifecyclePolicyMetadataTests::randomModifiedTime), instance.getLastSuccess(), instance.getLastFailure(), instance.getSnapshotInProgress(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadataTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadataTests.java index 090b4fe78253d..66e25c3b91db2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadataTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadataTests.java @@ -13,6 +13,8 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.time.Clock; +import java.time.Duration; import java.util.HashMap; import java.util.Map; @@ -79,7 +81,7 @@ public static SnapshotLifecyclePolicyMetadata createRandomPolicyMetadata(String SnapshotLifecyclePolicyMetadata.Builder builder = SnapshotLifecyclePolicyMetadata.builder() .setPolicy(randomSnapshotLifecyclePolicy(policyId)) .setVersion(randomNonNegativeLong()) - .setModifiedDate(randomNonNegativeLong()); + .setModifiedDate(randomModifiedTime()); if (randomBoolean()) { builder.setHeaders(randomHeaders()); } @@ -102,6 +104,7 @@ public static SnapshotLifecyclePolicy randomSnapshotLifecyclePolicy(String polic for (int i = 0; i < randomIntBetween(2, 5); i++) { config.put(randomAlphaOfLength(4), randomAlphaOfLength(4)); } + return new SnapshotLifecyclePolicy( policyId, randomAlphaOfLength(4), @@ -122,7 +125,41 @@ public static SnapshotRetentionConfiguration randomRetention() { ); } - public static String randomSchedule() { + public static String randomCronSchedule() { return randomIntBetween(0, 59) + " " + randomIntBetween(0, 59) + " " + randomIntBetween(0, 12) + " * * ?"; } + + public static String randomTimeValueString() { + // restrict to intervals greater than slm.minimum_interval value of 15 minutes + Duration minInterval = Duration.ofMinutes(15); + Map unitMinVal = Map.of( + "nanos", + minInterval.toNanos(), + "micros", + minInterval.toNanos() * 1000, + "ms", + minInterval.toMillis(), + "s", + minInterval.toSeconds(), + "m", + minInterval.toMinutes(), + "h", + minInterval.toHours(), + "d", + minInterval.toDays() + ); + var unit = randomFrom(unitMinVal.keySet()); + long minVal = Math.max(1, unitMinVal.get(unit)); + long value = randomLongBetween(minVal, 1000 * minVal); + return value + unit; + } + + public static String randomSchedule() { + return randomBoolean() ? randomCronSchedule() : randomTimeValueString(); + } + + public static long randomModifiedTime() { + // if modified time is after the current time, validation will fail + return randomLongBetween(0, Clock.systemUTC().millis()); + } } diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java index 3530f33704beb..30d1d6f7c914b 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java @@ -496,7 +496,7 @@ public void testPollIntervalUpdate() throws Exception { assertThat(indexLifecycleService.getScheduler().jobCount(), equalTo(1)); }); { - TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().getSchedule(); + TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().schedule(); assertThat(schedule.getInterval(), equalTo(pollInterval)); } @@ -504,7 +504,7 @@ public void testPollIntervalUpdate() throws Exception { TimeValue newPollInterval = TimeValue.timeValueHours(randomLongBetween(6, 1000)); updateClusterSettings(Settings.builder().put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, newPollInterval.getStringRep())); { - TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().getSchedule(); + TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().schedule(); assertThat(schedule.getInterval(), equalTo(newPollInterval)); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java index c2e2c80998992..9c978ffc25cba 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java @@ -353,8 +353,8 @@ private void cancelJob() { @Override public void triggered(SchedulerEngine.Event event) { - if (event.getJobName().equals(XPackField.INDEX_LIFECYCLE)) { - logger.trace("job triggered: " + event.getJobName() + ", " + event.getScheduledTime() + ", " + event.getTriggeredTime()); + if (event.jobName().equals(XPackField.INDEX_LIFECYCLE)) { + logger.trace("job triggered: " + event.jobName() + ", " + event.scheduledTime() + ", " + event.triggeredTime()); triggerPolicies(clusterService.state(), false); } } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index f4c420db47ac3..5704d7837268b 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -463,8 +463,8 @@ public synchronized void onCancelled() { public synchronized void triggered(SchedulerEngine.Event event) { // Verify this is actually the event that we care about, then trigger the indexer. // Note that the status of the indexer is checked in the indexer itself - if (event.getJobName().equals(SCHEDULE_NAME + "_" + job.getConfig().getId())) { - logger.debug("Rollup indexer [" + event.getJobName() + "] schedule has triggered, state: [" + indexer.getState() + "]"); + if (event.jobName().equals(SCHEDULE_NAME + "_" + job.getConfig().getId())) { + logger.debug("Rollup indexer [" + event.jobName() + "] schedule has triggered, state: [" + indexer.getState() + "]"); indexer.maybeTriggerAsyncJob(System.currentTimeMillis()); } } diff --git a/x-pack/plugin/slm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java b/x-pack/plugin/slm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java index abaf9a14aeadb..d42c8ec9655ef 100644 --- a/x-pack/plugin/slm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java +++ b/x-pack/plugin/slm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java @@ -38,6 +38,8 @@ import java.io.IOException; import java.io.InputStream; +import java.time.Duration; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -108,7 +110,8 @@ public void testFullPolicySnapshot() throws Exception { // allow arbitrarily frequent slm snapshots disableSLMMinimumIntervalValidation(); - createSnapshotPolicy(policyName, "snap", "*/1 * * * * ?", repoId, indexName, true); + var schedule = randomBoolean() ? "*/1 * * * * ?" : "1s"; + createSnapshotPolicy(policyName, "snap", schedule, repoId, indexName, true); // Check that the snapshot was actually taken assertBusy(() -> { @@ -176,7 +179,8 @@ public void testPolicyFailure() throws Exception { disableSLMMinimumIntervalValidation(); // Create a policy with ignore_unavailable: false and an index that doesn't exist - createSnapshotPolicy(policyName, "snap", "*/1 * * * * ?", repoName, indexPattern, false); + var schedule = randomBoolean() ? "*/1 * * * * ?" : "1s"; + createSnapshotPolicy(policyName, "snap", schedule, repoName, indexPattern, false); assertBusy(() -> { // Check that the failure is written to the cluster state @@ -300,10 +304,11 @@ public void testStartStopStatus() throws Exception { }); try { + var schedule = randomBoolean() ? "0 0/15 * * * ?" : "15m"; createSnapshotPolicy( policyName, "snap", - "0 0/15 * * * ?", + schedule, repoId, indexName, true, @@ -671,6 +676,36 @@ public void testSnapshotRetentionWithMissingRepo() throws Exception { }, 60, TimeUnit.SECONDS); } + @SuppressWarnings("unchecked") + public void testGetIntervalSchedule() throws Exception { + final String indexName = "index-1"; + final String policyName = "policy-1"; + final String repoId = "repo-1"; + + initializeRepo(repoId); + + var schedule = "30m"; + var now = Instant.now(); + createSnapshotPolicy(policyName, "snap", schedule, repoId, indexName, true); + + assertBusy(() -> { + Request getReq = new Request("GET", "/_slm/policy/" + policyName); + Response policyMetadata = client().performRequest(getReq); + Map policyResponseMap; + try (InputStream is = policyMetadata.getEntity().getContent()) { + policyResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + + Map policyMetadataMap = (Map) policyResponseMap.get(policyName); + Long nextExecutionMillis = (Long) policyMetadataMap.get("next_execution_millis"); + assertNotNull(nextExecutionMillis); + + Instant nextExecution = Instant.ofEpochMilli(nextExecutionMillis); + assertTrue(nextExecution.isAfter(now.plus(Duration.ofMinutes(29)))); + assertTrue(nextExecution.isBefore(now.plus(Duration.ofMinutes(31)))); + }); + } + public Map getLocation(String path) { try { Response executeRepsonse = client().performRequest(new Request("GET", path)); diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java index 0d79ecf31670c..192807d667abb 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.health.HealthIndicatorService; import org.elasticsearch.license.XPackLicenseState; @@ -92,6 +93,7 @@ public class SnapshotLifecycle extends Plugin implements ActionPlugin, HealthPlu private final SetOnce snapshotRetentionService = new SetOnce<>(); private final SetOnce snapshotHistoryStore = new SetOnce<>(); private final SetOnce slmHealthIndicatorService = new SetOnce<>(); + private final SetOnce featureService = new SetOnce<>(); private final Settings settings; public SnapshotLifecycle(Settings settings) { @@ -124,7 +126,7 @@ public Collection createComponents(PluginServices services) { ClusterService clusterService = services.clusterService(); ThreadPool threadPool = services.threadPool(); final List components = new ArrayList<>(); - + featureService.set(services.featureService()); SnapshotLifecycleTemplateRegistry templateRegistry = new SnapshotLifecycleTemplateRegistry( settings, clusterService, @@ -236,7 +238,7 @@ public List getRestHandlers( } List> reservedClusterStateHandlers() { - return List.of(new ReservedSnapshotAction()); + return List.of(new ReservedSnapshotAction(featureService.get())); } @Override diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java index f3dfe4fb26f65..96b962f70a1b6 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java @@ -13,8 +13,14 @@ import org.elasticsearch.xpack.slm.history.SnapshotLifecycleTemplateRegistry; import java.util.Map; +import java.util.Set; public class SnapshotLifecycleFeatures implements FeatureSpecification { + @Override + public Set getFeatures() { + return Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE); + } + @Override public Map getHistoricalFeatures() { return Map.of(SnapshotLifecycleTemplateRegistry.MANAGED_BY_DATA_STREAM_LIFECYCLE, Version.V_8_12_0); diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java index 6d77926149334..b93f90de73f05 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java @@ -20,10 +20,11 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xpack.core.ilm.LifecycleSettings; import org.elasticsearch.xpack.core.ilm.OperationMode; import org.elasticsearch.xpack.core.ilm.OperationModeUpdateTask; -import org.elasticsearch.xpack.core.scheduler.CronSchedule; import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadata; @@ -45,7 +46,7 @@ * task according to the policy's schedule. */ public class SnapshotLifecycleService implements Closeable, ClusterStateListener { - + public static final NodeFeature INTERVAL_SCHEDULE = new NodeFeature("slm.interval_schedule"); private static final Logger logger = LogManager.getLogger(SnapshotLifecycleService.class); private static final String JOB_PATTERN_SUFFIX = "-\\d+$"; @@ -193,15 +194,13 @@ public void maybeScheduleSnapshot(final SnapshotLifecyclePolicyMetadata snapshot // is identical to an existing job (meaning the version has not changed) then this does // not reschedule it. scheduledTasks.computeIfAbsent(jobId, id -> { - final SchedulerEngine.Job job = new SchedulerEngine.Job( - jobId, - new CronSchedule(snapshotLifecyclePolicy.getPolicy().getSchedule()) - ); if (existingJobsFoundAndCancelled) { logger.info("rescheduling updated snapshot lifecycle job [{}]", jobId); } else { logger.info("scheduling snapshot lifecycle job [{}]", jobId); } + + final SchedulerEngine.Job job = snapshotLifecyclePolicy.buildSchedulerJob(jobId); scheduler.add(job); return job; }); @@ -249,7 +248,7 @@ public static void validateRepositoryExists(final String repository, final Clust */ public static void validateMinimumInterval(final SnapshotLifecyclePolicy lifecycle, final ClusterState state) { TimeValue minimum = LifecycleSettings.SLM_MINIMUM_INTERVAL_SETTING.get(state.metadata().settings()); - TimeValue next = lifecycle.calculateNextInterval(); + TimeValue next = lifecycle.calculateNextInterval(Clock.systemUTC()); if (next.duration() > 0 && minimum.duration() > 0 && next.millis() < minimum.millis()) { throw new IllegalArgumentException( "invalid schedule [" @@ -262,6 +261,18 @@ public static void validateMinimumInterval(final SnapshotLifecyclePolicy lifecyc } } + /** + * Validate that interval schedule feature is not supported by all nodes + * @throws IllegalArgumentException if is interval expression but interval schedule not supported + */ + public static void validateIntervalScheduleSupport(String schedule, FeatureService featureService, ClusterState state) { + if (SnapshotLifecyclePolicy.isIntervalSchedule(schedule) && featureService.clusterHasFeature(state, INTERVAL_SCHEDULE) == false) { + throw new IllegalArgumentException( + "Unable to use slm interval schedules in mixed-clusters with nodes that do not support feature " + INTERVAL_SCHEDULE.id() + ); + } + } + @Override public void close() { if (this.running.compareAndSet(true, false)) { diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java index adf011e0ade37..d49f32869f28a 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java @@ -68,21 +68,21 @@ public SnapshotLifecycleTask(final Client client, final ClusterService clusterSe @Override public void triggered(SchedulerEngine.Event event) { - logger.debug("snapshot lifecycle policy task triggered from job [{}]", event.getJobName()); + logger.debug("snapshot lifecycle policy task triggered from job [{}]", event.jobName()); - final Optional snapshotName = maybeTakeSnapshot(event.getJobName(), client, clusterService, historyStore); + final Optional snapshotName = maybeTakeSnapshot(event.jobName(), client, clusterService, historyStore); // Would be cleaner if we could use Optional#ifPresentOrElse snapshotName.ifPresent( name -> logger.info( "snapshot lifecycle policy job [{}] issued new snapshot creation for [{}] successfully", - event.getJobName(), + event.jobName(), name ) ); if (snapshotName.isPresent() == false) { - logger.warn("snapshot lifecycle policy for job [{}] no longer exists, snapshot not created", event.getJobName()); + logger.warn("snapshot lifecycle policy for job [{}] no longer exists, snapshot not created", event.jobName()); } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java index 0cf1373e92beb..678e6941599c9 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java @@ -88,21 +88,21 @@ public SnapshotRetentionTask( @Override public void triggered(SchedulerEngine.Event event) { - assert event.getJobName().equals(SnapshotRetentionService.SLM_RETENTION_JOB_ID) - || event.getJobName().equals(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID) + assert event.jobName().equals(SnapshotRetentionService.SLM_RETENTION_JOB_ID) + || event.jobName().equals(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID) : "expected id to be " + SnapshotRetentionService.SLM_RETENTION_JOB_ID + " or " + SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID + " but it was " - + event.getJobName(); + + event.jobName(); final ClusterState state = clusterService.state(); // Skip running retention if SLM is disabled, however, even if it's // disabled we allow manual running. if (SnapshotLifecycleService.slmStoppedOrStopping(state) - && event.getJobName().equals(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID) == false) { + && event.jobName().equals(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID) == false) { logger.debug("skipping SLM retention as SLM is currently stopped or stopping"); return; } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotAction.java index f14edd89b826d..192b03aa385d5 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.xcontent.XContentParser; @@ -41,7 +42,11 @@ public class ReservedSnapshotAction implements ReservedClusterStateHandler prepare(List { private static final Logger logger = LogManager.getLogger(TransportPutSnapshotLifecycleAction.class); + private final FeatureService featureService; @Inject public TransportPutSnapshotLifecycleAction( @@ -56,7 +58,8 @@ public TransportPutSnapshotLifecycleAction( ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver + IndexNameExpressionResolver indexNameExpressionResolver, + FeatureService featureService ) { super( PutSnapshotLifecycleAction.NAME, @@ -69,6 +72,7 @@ public TransportPutSnapshotLifecycleAction( AcknowledgedResponse::readFrom, EsExecutors.DIRECT_EXECUTOR_SERVICE ); + this.featureService = featureService; } @Override @@ -78,8 +82,8 @@ protected void masterOperation( final ClusterState state, final ActionListener listener ) { + SnapshotLifecycleService.validateIntervalScheduleSupport(request.getLifecycle().getSchedule(), featureService, state); SnapshotLifecycleService.validateRepositoryExists(request.getLifecycle().getRepository(), state); - SnapshotLifecycleService.validateMinimumInterval(request.getLifecycle(), state); // headers from the thread context stored by the AuthenticationService to be shared between the diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java index fc4ee7867ed04..b7674a2d60bff 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java @@ -19,11 +19,17 @@ import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration; import java.io.IOException; +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.temporal.ChronoUnit; import java.util.Collections; import java.util.HashMap; import java.util.Map; import static org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests.randomSnapshotLifecyclePolicy; +import static org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests.randomTimeValueString; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -34,10 +40,11 @@ public class SnapshotLifecyclePolicyTests extends AbstractXContentSerializingTes private String id; public void testToRequest() { + var schedule = randomBoolean() ? "0 1 2 3 4 ? 2099" : "30m"; SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( "id", "name", - "0 1 2 3 4 ? 2099", + schedule, "repo", Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY @@ -47,13 +54,13 @@ public void testToRequest() { Collections.singletonMap("policy", "id") ); - p = new SnapshotLifecyclePolicy("id", "name", "0 1 2 3 4 ? 2099", "repo", null, null); + p = new SnapshotLifecyclePolicy("id", "name", schedule, "repo", null, null); request = p.toRequest(TEST_REQUEST_TIMEOUT); expected.waitForCompletion(true).snapshot(request.snapshot()).repository("repo").uuid(request.uuid()); assertEquals(expected, request); } - public void testNextExecutionTime() { + public void testNextExecutionTimeSchedule() { SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( "id", "name", @@ -62,10 +69,100 @@ public void testNextExecutionTime() { Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY ); - assertThat(p.calculateNextExecution(), equalTo(4078864860000L)); + assertThat(p.calculateNextExecution(-1, Clock.systemUTC()), equalTo(4078864860000L)); } - public void testCalculateNextInterval() { + public void testNextExecutionTimeInterval() { + SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( + "id", + "name", + "30m", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + + { + // current time is exactly modified time + Instant modifiedTime = Instant.parse("2024-07-17T00:00:00.000Z").truncatedTo(ChronoUnit.SECONDS); + Instant currentTime = modifiedTime; + Instant expected = Instant.parse("2024-07-17T00:30:00.000Z").truncatedTo(ChronoUnit.SECONDS); + assertThat(p.calculateNextExecution(modifiedTime.toEpochMilli(), fixedClock(currentTime)), equalTo(expected.toEpochMilli())); + } + + { + // current time is half an interval past modified time + Instant modifiedTime = Instant.parse("2024-07-17T00:00:00.000Z").truncatedTo(ChronoUnit.SECONDS); + Instant currentTime = modifiedTime.plus(Duration.ofMinutes(15)); + Instant expected = Instant.parse("2024-07-17T00:30:00.000Z").truncatedTo(ChronoUnit.SECONDS); + assertThat(p.calculateNextExecution(modifiedTime.toEpochMilli(), fixedClock(currentTime)), equalTo(expected.toEpochMilli())); + } + + { + // current time is a full day (24 intervals) ahead of modified time + Instant modifiedTime = Instant.parse("2024-07-17T00:00:00.000Z").truncatedTo(ChronoUnit.SECONDS); + Instant currentTime = modifiedTime.plus(Duration.ofDays(1)); + Instant expected = Instant.parse("2024-07-18T00:30:00.000Z").truncatedTo(ChronoUnit.SECONDS); + assertThat(p.calculateNextExecution(modifiedTime.toEpochMilli(), fixedClock(currentTime)), equalTo(expected.toEpochMilli())); + } + + { + // current time before modified time + Instant modifiedTime = Instant.parse("2024-07-17T00:00:00.000Z").truncatedTo(ChronoUnit.SECONDS); + Instant currentTime = modifiedTime.minus(Duration.ofHours(1)); + expectThrows(AssertionError.class, () -> p.calculateNextExecution(modifiedTime.toEpochMilli(), fixedClock(currentTime))); + } + + { + // current time is every minute of a day + Instant modifiedTime = Instant.parse("2024-07-17T00:00:00.000Z").truncatedTo(ChronoUnit.SECONDS); + Instant currentTime = modifiedTime; + Instant expectedTime = modifiedTime.plus(Duration.ofMinutes(30)); + + for (; currentTime.isBefore(modifiedTime.plus(Duration.ofDays(1))); currentTime = currentTime.plus(Duration.ofMinutes(1))) { + if (currentTime.equals(expectedTime)) { + expectedTime = expectedTime.plus(Duration.ofMinutes(30)); + } + assertThat( + p.calculateNextExecution(modifiedTime.toEpochMilli(), fixedClock(currentTime)), + equalTo(expectedTime.toEpochMilli()) + ); + } + } + } + + private static Clock fixedClock(Instant instant) { + return Clock.fixed(instant, ZoneOffset.UTC); + } + + public void testCalculateNextIntervalInterval() { + + { + SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( + "id", + "name", + "30m", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + assertThat(p.calculateNextInterval(Clock.systemUTC()), equalTo(TimeValue.timeValueMinutes(30))); + } + { + String schedule = randomTimeValueString(); + SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( + "id", + "name", + schedule, + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + assertThat(p.calculateNextInterval(Clock.systemUTC()), equalTo(TimeValue.parseTimeValue(schedule, "schedule"))); + } + } + + public void testCalculateNextIntervalSchedule() { { SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( "id", @@ -75,7 +172,7 @@ public void testCalculateNextInterval() { Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY ); - assertThat(p.calculateNextInterval(), equalTo(TimeValue.timeValueMinutes(5))); + assertThat(p.calculateNextInterval(Clock.systemUTC()), equalTo(TimeValue.timeValueMinutes(5))); } { @@ -87,7 +184,7 @@ public void testCalculateNextInterval() { Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY ); - assertThat(p.calculateNextInterval(), equalTo(TimeValue.MINUS_ONE)); + assertThat(p.calculateNextInterval(Clock.systemUTC()), equalTo(TimeValue.MINUS_ONE)); } { @@ -99,7 +196,7 @@ public void testCalculateNextInterval() { Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY ); - assertThat(p.calculateNextInterval(), equalTo(TimeValue.MINUS_ONE)); + assertThat(p.calculateNextInterval(Clock.systemUTC()), equalTo(TimeValue.MINUS_ONE)); } } @@ -123,7 +220,7 @@ public void testValidation() { + " the following characters " + Strings.INVALID_FILENAME_CHARS, "invalid repository name [ ]: cannot be empty", - "invalid schedule: invalid cron expression [* * * * * L]" + "invalid schedule [* * * * * L]: must be a valid cron expression or time unit" ) ); } @@ -149,6 +246,34 @@ public void testValidation() { ); } + { + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( + "my_policy", + "my_snap", + "0d", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + + ValidationException e = policy.validate(); + assertThat(e.validationErrors(), containsInAnyOrder("invalid schedule [0d]: time unit must be at least 1 millisecond")); + } + + { + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( + "my_policy", + "my_snap", + "999micros", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + + ValidationException e = policy.validate(); + assertThat(e.validationErrors(), containsInAnyOrder("invalid schedule [999micros]: time unit must be at least 1 millisecond")); + } + { SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( "my_policy", @@ -161,6 +286,33 @@ public void testValidation() { ValidationException e = policy.validate(); assertThat(e, nullValue()); } + + { + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( + "my_policy", + "my_snap", + "30m", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + ValidationException e = policy.validate(); + assertThat(e, nullValue()); + } + + { + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( + "my_policy", + "my_snap", + "1ms", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + + ValidationException e = policy.validate(); + assertThat(e, nullValue()); + } } public void testMetadataValidation() { diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java index 5b59ac9efc0ab..36887681f5575 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -37,6 +38,7 @@ import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests; import org.elasticsearch.xpack.core.slm.SnapshotLifecycleStats; import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; @@ -48,6 +50,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -108,7 +111,7 @@ public void testRepositoryExistenceForMissingRepo() { public void testNothingScheduledWhenNotRunning() throws InterruptedException { ClockMock clock = new ClockMock(); SnapshotLifecyclePolicyMetadata initialPolicy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("initial", "*/1 * * * * ?")) + .setPolicy(createPolicy("initial", randomBoolean() ? "*/1 * * * * ?" : "1s")) .setHeaders(Collections.emptyMap()) .setVersion(1) .setModifiedDate(1) @@ -133,7 +136,7 @@ public void testNothingScheduledWhenNotRunning() throws InterruptedException { sls.init(); SnapshotLifecyclePolicyMetadata newPolicy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo", "*/1 * * * * ?")) + .setPolicy(createPolicy("foo", randomBoolean() ? "*/1 * * * * ?" : "1s")) .setHeaders(Collections.emptyMap()) .setVersion(2) .setModifiedDate(2) @@ -211,7 +214,7 @@ public void testPolicyCRUD() throws Exception { Map policies = new HashMap<>(); SnapshotLifecyclePolicyMetadata policy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo", "*/1 * * * * ?")) + .setPolicy(createPolicy("foo", randomBoolean() ? "*/1 * * * * ?" : "1s")) .setHeaders(Collections.emptyMap()) .setModifiedDate(1) .build(); @@ -240,7 +243,7 @@ public void testPolicyCRUD() throws Exception { int currentCount = triggerCount.get(); previousState = state; SnapshotLifecyclePolicyMetadata newPolicy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo", "*/1 * * * * ?")) + .setPolicy(createPolicy("foo", randomBoolean() ? "*/1 * * * * ?" : "1s")) .setHeaders(Collections.emptyMap()) .setVersion(2) .setModifiedDate(2) @@ -253,7 +256,7 @@ public void testPolicyCRUD() throws Exception { CopyOnWriteArrayList triggeredJobs = new CopyOnWriteArrayList<>(); trigger.set(e -> { - triggeredJobs.add(e.getJobName()); + triggeredJobs.add(e.jobName()); triggerCount.incrementAndGet(); }); clock.fastForwardSeconds(1); @@ -283,7 +286,7 @@ public void testPolicyCRUD() throws Exception { // When the service is no longer master, all jobs should be automatically cancelled policy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo", "*/1 * * * * ?")) + .setPolicy(createPolicy("foo", randomBoolean() ? "*/1 * * * * ?" : "1s")) .setHeaders(Collections.emptyMap()) .setVersion(3) .setModifiedDate(1) @@ -343,7 +346,7 @@ public void testPolicyNamesEndingInNumbers() throws Exception { Map policies = new HashMap<>(); SnapshotLifecyclePolicyMetadata policy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo-2", "30 * * * * ?")) + .setPolicy(createPolicy("foo-2", randomBoolean() ? "30 * * * * ?" : "30s")) .setHeaders(Collections.emptyMap()) .setVersion(1) .setModifiedDate(1) @@ -358,7 +361,7 @@ public void testPolicyNamesEndingInNumbers() throws Exception { assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.singleton("foo-2-1"))); SnapshotLifecyclePolicyMetadata secondPolicy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo-1", "45 * * * * ?")) + .setPolicy(createPolicy("foo-1", randomBoolean() ? "45 * * * * ?" : "45s")) .setHeaders(Collections.emptyMap()) .setVersion(2) .setModifiedDate(1) @@ -410,33 +413,70 @@ public void testValidateMinimumInterval() { ) .build(); - for (String schedule : List.of("0 0/15 * * * ?", "0 0 1 * * ?", "0 0 0 1 1 ? 2099" /* once */, "* * * 31 FEB ? *" /* never */)) { - SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), defaultState); - SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), validationOneMinuteState); - SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), validationDisabledState); + { // using chron schedule + for (String schedule : List.of( + "0 0/15 * * * ?", + "0 0 1 * * ?", + "0 0 0 1 1 ? 2099" /* once */, + "* * * 31 FEB ? *" /* never */ + )) { + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), defaultState); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), validationOneMinuteState); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), validationDisabledState); + } + + IllegalArgumentException e; + + e = expectThrows( + IllegalArgumentException.class, + () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0 0/1 * * * ?"), defaultState) + ); + assertThat( + e.getMessage(), + equalTo("invalid schedule [0 0/1 * * * ?]: " + "schedule would be too frequent, executing more than every [15m]") + ); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0 0/1 * * * ?"), validationOneMinuteState); + + e = expectThrows( + IllegalArgumentException.class, + () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0/30 0/1 * * * ?"), validationOneMinuteState) + ); + assertThat( + e.getMessage(), + equalTo("invalid schedule [0/30 0/1 * * * ?]: " + "schedule would be too frequent, executing more than every [1m]") + ); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0/30 0/1 * * * ?"), validationDisabledState); } - IllegalArgumentException e; + { // using time value + for (String interval : List.of("15m", "1h", "1d")) { + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", interval), defaultState); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", interval), validationOneMinuteState); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", interval), validationDisabledState); + } - e = expectThrows( - IllegalArgumentException.class, - () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0 0/1 * * * ?"), defaultState) - ); - assertThat( - e.getMessage(), - equalTo("invalid schedule [0 0/1 * * * ?]: " + "schedule would be too frequent, executing more than every [15m]") - ); - SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0 0/1 * * * ?"), validationOneMinuteState); + IllegalArgumentException e; - e = expectThrows( - IllegalArgumentException.class, - () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0/30 0/1 * * * ?"), validationOneMinuteState) - ); - assertThat( - e.getMessage(), - equalTo("invalid schedule [0/30 0/1 * * * ?]: " + "schedule would be too frequent, executing more than every [1m]") - ); - SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0/30 0/1 * * * ?"), validationDisabledState); + e = expectThrows( + IllegalArgumentException.class, + () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "1m"), defaultState) + ); + assertThat( + e.getMessage(), + equalTo("invalid schedule [1m]: " + "schedule would be too frequent, executing more than every [15m]") + ); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "1m"), validationOneMinuteState); + + e = expectThrows( + IllegalArgumentException.class, + () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "30s"), validationOneMinuteState) + ); + assertThat( + e.getMessage(), + equalTo("invalid schedule [30s]: " + "schedule would be too frequent, executing more than every [1m]") + ); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "30s"), validationDisabledState); + } } public void testStoppedPriority() { @@ -485,6 +525,41 @@ public void submitUnbatchedStateUpdateTask(String source, ClusterStateUpdateTask } } + public void testValidateIntervalScheduleSupport() { + var featureService = new FeatureService(List.of(new SnapshotLifecycleFeatures())); + { + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .nodeFeatures(Map.of("a", Set.of(), "b", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) + .build(); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> SnapshotLifecycleService.validateIntervalScheduleSupport("30d", featureService, state) + ); + assertThat(e.getMessage(), containsString("Unable to use slm interval schedules")); + } + { + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .nodeFeatures(Map.of("a", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) + .build(); + try { + SnapshotLifecycleService.validateIntervalScheduleSupport("30d", featureService, state); + } catch (Exception e) { + fail("interval schedule is supported by version and should not fail"); + } + } + { + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .nodeFeatures(Map.of("a", Set.of(), "b", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) + .build(); + try { + SnapshotLifecycleService.validateIntervalScheduleSupport("*/1 * * * * ?", featureService, state); + } catch (Exception e) { + fail("cron schedule does not need feature check and should not fail"); + } + } + } + class FakeSnapshotTask extends SnapshotLifecycleTask { private final Consumer onTriggered; @@ -515,7 +590,7 @@ public ClusterState createState(SnapshotLifecycleMetadata snapMeta, boolean loca } public static SnapshotLifecyclePolicy createPolicy(String id) { - return createPolicy(id, randomSchedule()); + return createPolicy(id, SnapshotLifecyclePolicyMetadataTests.randomSchedule()); } public static SnapshotLifecyclePolicy createPolicy(String id, String schedule) { @@ -534,8 +609,4 @@ public static SnapshotLifecyclePolicy createPolicy(String id, String schedule) { SnapshotRetentionConfiguration.EMPTY ); } - - public static String randomSchedule() { - return randomIntBetween(0, 59) + " " + randomIntBetween(0, 59) + " " + randomIntBetween(0, 12) + " * * ?"; - } } diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionServiceTests.java index dbb22f8dd49d8..877aa0ddb7342 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionServiceTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ilm.LifecycleSettings; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; import org.elasticsearch.xpack.slm.history.SnapshotHistoryStore; @@ -52,7 +53,7 @@ public void testJobsAreScheduled() throws InterruptedException { assertThat(service.getScheduler().jobCount(), equalTo(0)); service.onMaster(); - service.setUpdateSchedule(SnapshotLifecycleServiceTests.randomSchedule()); + service.setUpdateSchedule(SnapshotLifecyclePolicyMetadataTests.randomCronSchedule()); assertThat(service.getScheduler().scheduledJobIds(), containsInAnyOrder(SnapshotRetentionService.SLM_RETENTION_JOB_ID)); service.offMaster(); @@ -81,7 +82,7 @@ public void testManualTriggering() throws InterruptedException { try ( ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool, clusterSettings); SnapshotRetentionService service = new SnapshotRetentionService(Settings.EMPTY, () -> new FakeRetentionTask(event -> { - assertThat(event.getJobName(), equalTo(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID)); + assertThat(event.jobName(), equalTo(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID)); invoked.incrementAndGet(); }), clock) ) { diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java index 71346ebc495d4..0fcc4b8007c6d 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Releasable; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; @@ -79,17 +80,17 @@ private TransformState processJSON(ReservedSnapshotAction action, TransformState } public void testDependencies() { - var action = new ReservedSnapshotAction(); + var action = new ReservedSnapshotAction(mock(FeatureService.class)); assertThat(action.optionalDependencies(), contains(ReservedRepositoryAction.NAME)); } - public void testValidationFails() { + public void testValidationFailsNeitherScheduleOrInterval() { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); final ClusterName clusterName = new ClusterName("elasticsearch"); ClusterState state = ClusterState.builder(clusterName).build(); - ReservedSnapshotAction action = new ReservedSnapshotAction(); + ReservedSnapshotAction action = new ReservedSnapshotAction(mock(FeatureService.class)); TransformState prevState = new TransformState(state, Set.of()); String badPolicyJSON = """ @@ -117,6 +118,56 @@ public void testValidationFails() { ); } + public void testIntervalScheduleSupportValidation() { + Client client = mock(Client.class); + when(client.settings()).thenReturn(Settings.EMPTY); + final ClusterName clusterName = new ClusterName("elasticsearch"); + List repositoriesMetadata = List.of(new RepositoryMetadata("repo", "fs", Settings.EMPTY)); + Metadata.Builder mdBuilder = Metadata.builder(); + mdBuilder.putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata)); + ClusterState state = ClusterState.builder(clusterName).metadata(mdBuilder).build(); + TransformState prevState = new TransformState(state, Set.of()); + String goodPolicyJSON = """ + { + "daily-snapshots": { + "schedule": "30d", + "name": "", + "repository": "repo", + "config": { + "indices": ["foo-*", "important"], + "ignore_unavailable": true, + "include_global_state": false + }, + "retention": { + "expire_after": "30d", + "min_count": 1, + "max_count": 50 + } + } + } + """; + + { + FeatureService featureService = mock(FeatureService.class); + when(featureService.clusterHasFeature(any(), any())).thenReturn(false); + ReservedSnapshotAction action = new ReservedSnapshotAction(featureService); + assertThat( + expectThrows(IllegalArgumentException.class, () -> processJSON(action, prevState, goodPolicyJSON)).getMessage(), + is("Error on validating SLM requests") + ); + } + { + FeatureService featureService = mock(FeatureService.class); + when(featureService.clusterHasFeature(any(), any())).thenReturn(true); + ReservedSnapshotAction action = new ReservedSnapshotAction(featureService); + try { + processJSON(action, prevState, goodPolicyJSON); + } catch (Exception e) { + fail("interval schedule with interval feature should pass validation"); + } + } + } + public void testActionAddRemove() throws Exception { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); @@ -128,7 +179,7 @@ public void testActionAddRemove() throws Exception { mdBuilder.putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata)); ClusterState state = ClusterState.builder(clusterName).metadata(mdBuilder).build(); - ReservedSnapshotAction action = new ReservedSnapshotAction(); + ReservedSnapshotAction action = new ReservedSnapshotAction(mock(FeatureService.class)); String emptyJSON = ""; @@ -362,7 +413,7 @@ public void testOperatorControllerFromJSONContent() throws IOException { null, List.of( new ReservedClusterSettingsAction(clusterSettings), - new ReservedSnapshotAction(), + new ReservedSnapshotAction(mock(FeatureService.class)), new ReservedRepositoryAction(repositoriesService) ) ); @@ -396,7 +447,8 @@ public void testPutSLMReservedStateHandler() throws Exception { mock(ClusterService.class), threadPool, mock(ActionFilters.class), - mock(IndexNameExpressionResolver.class) + mock(IndexNameExpressionResolver.class), + mock(FeatureService.class) ); assertThat(putAction.reservedStateHandlerName().get(), equalTo(ReservedSnapshotAction.NAME)); diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java index 750fdd40c12d6..211afe8e55a15 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests; import org.junit.After; import org.junit.Before; @@ -194,10 +195,14 @@ public static SnapshotLifecyclePolicy randomSnapshotLifecyclePolicy(String id) { config.put(randomAlphaOfLength(4), randomAlphaOfLength(4)); } } - return new SnapshotLifecyclePolicy(id, randomAlphaOfLength(4), randomSchedule(), randomAlphaOfLength(4), config, null); - } - private static String randomSchedule() { - return randomIntBetween(0, 59) + " " + randomIntBetween(0, 59) + " " + randomIntBetween(0, 12) + " * * ?"; + return new SnapshotLifecyclePolicy( + id, + randomAlphaOfLength(4), + SnapshotLifecyclePolicyMetadataTests.randomSchedule(), + randomAlphaOfLength(4), + config, + null + ); } } From e1dc59625f9081ba74c7502e4a0a0026dd3776fc Mon Sep 17 00:00:00 2001 From: Parker Timmins Date: Fri, 23 Aug 2024 02:21:11 -0500 Subject: [PATCH 161/389] SLM interval schedule followup - add back getFieldName style getters (#112123) Recent SLM interval change #110847 included changing two classes to records. This changed the getter methods from the form getFieldName() to fieldName(). Unfortunately, serverless expected the fieldName() form. Until serverless can be updated, we'll add back the getFieldName() style getters, in addition to the fieldName() getters, so as not to break the build. --- docs/changelog/112123.yaml | 5 +++ .../common/scheduler/SchedulerEngine.java | 34 +++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 docs/changelog/112123.yaml diff --git a/docs/changelog/112123.yaml b/docs/changelog/112123.yaml new file mode 100644 index 0000000000000..0c0d7ac44cd17 --- /dev/null +++ b/docs/changelog/112123.yaml @@ -0,0 +1,5 @@ +pr: 112123 +summary: SLM interval schedule followup - add back `getFieldName` style getters +area: ILM+SLM +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java b/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java index ab63ab4062767..66b4f3c82e3cf 100644 --- a/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java +++ b/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java @@ -58,6 +58,23 @@ public record Job(String id, Schedule schedule, @Nullable Long fixedStartTime) { public Job(String id, Schedule schedule) { this(id, schedule, null); } + + /** + * The following getters are redundant with the getters built in by the record. + * Unfortunately, getFieldName form getters are expected by serverless. + * These getters are being added back until serverless can be updated for the new getters. + */ + public String getId() { + return id; + } + + public Schedule getSchedule() { + return schedule; + } + + public Long getFixedStartTime() { + return fixedStartTime; + } } public record Event(String jobName, long triggeredTime, long scheduledTime) { @@ -65,6 +82,23 @@ public record Event(String jobName, long triggeredTime, long scheduledTime) { public String toString() { return "Event[jobName=" + jobName + "," + "triggeredTime=" + triggeredTime + "," + "scheduledTime=" + scheduledTime + "]"; } + + /** + * The following getters are redundant with the getters built in by the record. + * Unfortunately, getFieldName form getters are expected by serverless. + * These getters are being added back until serverless can be updated for the new getters. + */ + public String getJobName() { + return jobName; + } + + public long getTriggeredTime() { + return triggeredTime; + } + + public long getScheduledTime() { + return scheduledTime; + } } public interface Listener { From e0c1ccbc1e2d843f86324c4b888c77b37ce7f800 Mon Sep 17 00:00:00 2001 From: Niels Bauman <33722607+nielsbauman@users.noreply.github.com> Date: Fri, 23 Aug 2024 09:26:55 +0200 Subject: [PATCH 162/389] Make enrich cache based on memory usage (#111412) The max enrich cache size setting now also supports an absolute max size in bytes (of used heap space) and a percentage of the max heap space, next to the existing flat document count. The default is 1% of the max heap space. This should prevent issues where the enrich cache takes up a lot of memory when there are large documents in the cache. --- docs/changelog/111412.yaml | 6 ++ docs/reference/ingest/enrich.asciidoc | 20 ++++-- .../xpack/enrich/EnrichCache.java | 23 ++++++- .../xpack/enrich/EnrichPlugin.java | 65 ++++++++++++++++++- .../FlatNumberOrByteSizeValueTests.java | 59 +++++++++++++++++ 5 files changed, 162 insertions(+), 11 deletions(-) create mode 100644 docs/changelog/111412.yaml create mode 100644 x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/FlatNumberOrByteSizeValueTests.java diff --git a/docs/changelog/111412.yaml b/docs/changelog/111412.yaml new file mode 100644 index 0000000000000..297fa77cd2664 --- /dev/null +++ b/docs/changelog/111412.yaml @@ -0,0 +1,6 @@ +pr: 111412 +summary: Make enrich cache based on memory usage +area: Ingest Node +type: enhancement +issues: + - 106081 diff --git a/docs/reference/ingest/enrich.asciidoc b/docs/reference/ingest/enrich.asciidoc index 6642cdc2a74ce..4bd50641149c0 100644 --- a/docs/reference/ingest/enrich.asciidoc +++ b/docs/reference/ingest/enrich.asciidoc @@ -230,12 +230,12 @@ Instead, you can: [[ingest-enrich-components]] ==== Enrich components -The enrich coordinator is a component that manages and performs the searches +The enrich coordinator is a component that manages and performs the searches required to enrich documents on each ingest node. It combines searches from all enrich processors in all pipelines into bulk <>. -The enrich policy executor is a component that manages the executions of all -enrich policies. When an enrich policy is executed, this component creates +The enrich policy executor is a component that manages the executions of all +enrich policies. When an enrich policy is executed, this component creates a new enrich index and removes the previous enrich index. The enrich policy executions are managed from the elected master node. The execution of these policies occurs on a different node. @@ -249,9 +249,15 @@ enrich policy executor. The enrich coordinator supports the following node settings: `enrich.cache_size`:: -Maximum number of searches to cache for enriching documents. Defaults to `1000`. -There is a single cache for all enrich processors in the cluster. This setting -determines the size of that cache. +Maximum size of the cache that caches searches for enriching documents. +The size can be specified in three units: the raw number of +cached searches (e.g. `1000`), an absolute size in bytes (e.g. `100Mb`), +or a percentage of the max heap space of the node (e.g. `1%`). +Both for the absolute byte size and the percentage of heap space, +{es} does not guarantee that the enrich cache size will adhere exactly to that maximum, +as {es} uses the byte size of the serialized search response +which is is a good representation of the used space on the heap, but not an exact match. +Defaults to `1%`. There is a single cache for all enrich processors in the cluster. `enrich.coordinator_proxy.max_concurrent_requests`:: Maximum number of concurrent <> to @@ -280,4 +286,4 @@ Maximum number of enrich policies to execute concurrently. Defaults to `50`. include::geo-match-enrich-policy-type-ex.asciidoc[] include::match-enrich-policy-type-ex.asciidoc[] -include::range-enrich-policy-type-ex.asciidoc[] \ No newline at end of file +include::range-enrich-policy-type-ex.asciidoc[] diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java index 35c2071188864..0130bd5537a11 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; @@ -29,6 +30,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiConsumer; import java.util.function.LongSupplier; +import java.util.function.ToLongBiFunction; /** * A simple cache for enrich that uses {@link Cache}. There is one instance of this cache and @@ -61,12 +63,29 @@ public final class EnrichCache { this(maxSize, System::nanoTime); } + EnrichCache(ByteSizeValue maxByteSize) { + this(maxByteSize, System::nanoTime); + } + // non-private for unit testing only EnrichCache(long maxSize, LongSupplier relativeNanoTimeProvider) { this.relativeNanoTimeProvider = relativeNanoTimeProvider; - this.cache = CacheBuilder.builder().setMaximumWeight(maxSize).removalListener(notification -> { + this.cache = createCache(maxSize, null); + } + + EnrichCache(ByteSizeValue maxByteSize, LongSupplier relativeNanoTimeProvider) { + this.relativeNanoTimeProvider = relativeNanoTimeProvider; + this.cache = createCache(maxByteSize.getBytes(), (key, value) -> value.sizeInBytes); + } + + private Cache createCache(long maxWeight, ToLongBiFunction weigher) { + var builder = CacheBuilder.builder().setMaximumWeight(maxWeight).removalListener(notification -> { sizeInBytes.getAndAdd(-1 * notification.getValue().sizeInBytes); - }).build(); + }); + if (weigher != null) { + builder.weigher(weigher); + } + return builder.build(); } /** diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java index 868ec49ff1d97..1a68ada60b6f1 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java @@ -12,17 +12,22 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.MemorySizeValue; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.ingest.Processor; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SystemIndexPlugin; @@ -121,14 +126,29 @@ public class EnrichPlugin extends Plugin implements SystemIndexPlugin, IngestPlu return String.valueOf(maxConcurrentRequests * maxLookupsPerRequest); }, val -> Setting.parseInt(val, 1, Integer.MAX_VALUE, QUEUE_CAPACITY_SETTING_NAME), Setting.Property.NodeScope); - public static final Setting CACHE_SIZE = Setting.longSetting("enrich.cache_size", 1000, 0, Setting.Property.NodeScope); + public static final String CACHE_SIZE_SETTING_NAME = "enrich.cache.size"; + public static final Setting CACHE_SIZE = new Setting<>( + "enrich.cache.size", + (String) null, + (String s) -> FlatNumberOrByteSizeValue.parse( + s, + CACHE_SIZE_SETTING_NAME, + new FlatNumberOrByteSizeValue(ByteSizeValue.ofBytes((long) (0.01 * JvmInfo.jvmInfo().getConfiguredMaxHeapSize()))) + ), + Setting.Property.NodeScope + ); private final Settings settings; private final EnrichCache enrichCache; public EnrichPlugin(final Settings settings) { this.settings = settings; - this.enrichCache = new EnrichCache(CACHE_SIZE.get(settings)); + FlatNumberOrByteSizeValue maxSize = CACHE_SIZE.get(settings); + if (maxSize.byteSizeValue() != null) { + this.enrichCache = new EnrichCache(maxSize.byteSizeValue()); + } else { + this.enrichCache = new EnrichCache(maxSize.flatNumber()); + } } @Override @@ -265,4 +285,45 @@ public String getFeatureName() { public String getFeatureDescription() { return "Manages data related to Enrich policies"; } + + /** + * A class that specifies either a flat (unit-less) number or a byte size value. + */ + public static class FlatNumberOrByteSizeValue { + + @Nullable + private final Long flatNumber; + @Nullable + private final ByteSizeValue byteSizeValue; + + public FlatNumberOrByteSizeValue(ByteSizeValue byteSizeValue) { + this.byteSizeValue = byteSizeValue; + this.flatNumber = null; + } + + public FlatNumberOrByteSizeValue(Long flatNumber) { + this.flatNumber = flatNumber; + this.byteSizeValue = null; + } + + public static FlatNumberOrByteSizeValue parse(String value, String settingName, FlatNumberOrByteSizeValue defaultValue) { + if (Strings.hasText(value) == false) { + return defaultValue; + } + if (Character.isDigit(value.charAt(value.length() - 1)) == false) { + return new FlatNumberOrByteSizeValue(MemorySizeValue.parseBytesSizeValueOrHeapRatio(value, settingName)); + } + return new FlatNumberOrByteSizeValue(Long.parseLong(value)); + } + + @Nullable + public ByteSizeValue byteSizeValue() { + return byteSizeValue; + } + + @Nullable + public Long flatNumber() { + return flatNumber; + } + } } diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/FlatNumberOrByteSizeValueTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/FlatNumberOrByteSizeValueTests.java new file mode 100644 index 0000000000000..809b78c50b35a --- /dev/null +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/FlatNumberOrByteSizeValueTests.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.enrich; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.xpack.enrich.EnrichPlugin.FlatNumberOrByteSizeValue; + +public class FlatNumberOrByteSizeValueTests extends ESTestCase { + + private static final String SETTING_NAME = "test.setting"; + + public void testParse() { + int number = randomIntBetween(1, Integer.MAX_VALUE); + assertEquals( + new FlatNumberOrByteSizeValue((long) number), + FlatNumberOrByteSizeValue.parse(Integer.toString(number), SETTING_NAME, null) + ); + assertEquals( + new FlatNumberOrByteSizeValue(ByteSizeValue.ofGb(number)), + FlatNumberOrByteSizeValue.parse(number + "GB", SETTING_NAME, null) + ); + assertEquals( + new FlatNumberOrByteSizeValue(ByteSizeValue.ofGb(number)), + FlatNumberOrByteSizeValue.parse(number + "g", SETTING_NAME, null) + ); + int percentage = randomIntBetween(0, 100); + assertEquals( + new FlatNumberOrByteSizeValue( + ByteSizeValue.ofBytes((long) ((double) percentage / 100 * JvmInfo.jvmInfo().getConfiguredMaxHeapSize())) + ), + FlatNumberOrByteSizeValue.parse(percentage + "%", SETTING_NAME, null) + ); + assertEquals(new FlatNumberOrByteSizeValue(0L), FlatNumberOrByteSizeValue.parse("0", SETTING_NAME, null)); + assertEquals(new FlatNumberOrByteSizeValue(ByteSizeValue.ZERO), FlatNumberOrByteSizeValue.parse("0GB", SETTING_NAME, null)); + assertEquals(new FlatNumberOrByteSizeValue(ByteSizeValue.ZERO), FlatNumberOrByteSizeValue.parse("0%", SETTING_NAME, null)); + // Assert default value. + assertEquals( + new FlatNumberOrByteSizeValue((long) number), + FlatNumberOrByteSizeValue.parse(null, SETTING_NAME, new FlatNumberOrByteSizeValue((long) number)) + ); + assertThrows(ElasticsearchParseException.class, () -> FlatNumberOrByteSizeValue.parse("5GB%", SETTING_NAME, null)); + assertThrows(ElasticsearchParseException.class, () -> FlatNumberOrByteSizeValue.parse("5%GB", SETTING_NAME, null)); + assertThrows(ElasticsearchParseException.class, () -> FlatNumberOrByteSizeValue.parse("5GBX", SETTING_NAME, null)); + } + + private void assertEquals(FlatNumberOrByteSizeValue expected, FlatNumberOrByteSizeValue actual) { + assertEquals(expected.byteSizeValue(), actual.byteSizeValue()); + assertEquals(expected.flatNumber(), actual.flatNumber()); + } +} From e46b5173a951ce6b33b93288e35e3ad50da9929b Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 23 Aug 2024 14:59:38 +0700 Subject: [PATCH 163/389] Minor cleanup of code in the org.elasticsearch.index.codec package. (#112125) * Removing unnesesary field * making inner class static * use enhaunced switch statement * removed commented out code. * made immutable fields final --- .../index/codec/PerFieldFormatSupplier.java | 4 --- .../codec/postings/ES812PostingsReader.java | 34 +++++++------------ .../index/codec/postings/ES812SkipReader.java | 4 +-- .../index/codec/postings/ES812SkipWriter.java | 8 ++--- .../codec/tsdb/ES87TSDBDocValuesProducer.java | 26 +++++--------- 5 files changed, 28 insertions(+), 48 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java index 1228c908f7c18..685e9774b04a7 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java @@ -24,8 +24,6 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; -import java.util.Objects; - /** * Class that encapsulates the logic of figuring out the most appropriate file format for a given field, across postings, doc values and * vectors. @@ -33,7 +31,6 @@ public class PerFieldFormatSupplier { private final MapperService mapperService; - private final BigArrays bigArrays; private final DocValuesFormat docValuesFormat = new Lucene90DocValuesFormat(); private final KnnVectorsFormat knnVectorsFormat = new Lucene99HnswVectorsFormat(); private final ES87BloomFilterPostingsFormat bloomFilterPostingsFormat; @@ -43,7 +40,6 @@ public class PerFieldFormatSupplier { public PerFieldFormatSupplier(MapperService mapperService, BigArrays bigArrays) { this.mapperService = mapperService; - this.bigArrays = Objects.requireNonNull(bigArrays); this.bloomFilterPostingsFormat = new ES87BloomFilterPostingsFormat(bigArrays, this::internalGetPostingsFormatForField); this.tsdbDocValuesFormat = new ES87TSDBDocValuesFormat(); this.es812PostingsFormat = new ES812PostingsFormat(); diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java index 1aada2a153c3c..3aaf2ee5a8c4b 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java @@ -874,10 +874,6 @@ public int advance(int target) throws IOException { private void skipPositions() throws IOException { // Skip positions now: int toSkip = posPendingCount - freq; - // if (DEBUG) { - // System.out.println(" FPR.skipPositions: toSkip=" + toSkip); - // } - final int leftInBlock = BLOCK_SIZE - posBufferUpto; if (toSkip < leftInBlock) { int end = posBufferUpto + toSkip; @@ -1010,7 +1006,7 @@ final class BlockImpactsDocsEnum extends ImpactsEnum { final boolean indexHasFreqs; - private int docFreq; // number of docs in this posting list + private final int docFreq; // number of docs in this posting list private int blockUpto; // number of documents in or before the current block private int doc; // doc we last read private long accum; // accumulator for doc deltas @@ -1211,8 +1207,8 @@ final class BlockImpactsPostingsEnum extends ImpactsEnum { final boolean indexHasOffsets; final boolean indexHasPayloads; - private int docFreq; // number of docs in this posting list - private long totalTermFreq; // number of positions in this posting list + private final int docFreq; // number of docs in this posting list + private final long totalTermFreq; // number of positions in this posting list private int docUpto; // how many docs we've read private int doc; // doc we last read private long accum; // accumulator for doc deltas @@ -1228,19 +1224,19 @@ final class BlockImpactsPostingsEnum extends ImpactsEnum { private long posPendingFP; // Where this term's postings start in the .doc file: - private long docTermStartFP; + private final long docTermStartFP; // Where this term's postings start in the .pos file: - private long posTermStartFP; + private final long posTermStartFP; // Where this term's payloads/offsets start in the .pay // file: - private long payTermStartFP; + private final long payTermStartFP; // File pointer where the last (vInt encoded) pos delta // block is. We need this to know whether to bulk // decode vs vInt decode the block: - private long lastPosBlockFP; + private final long lastPosBlockFP; private int nextSkipDoc = -1; @@ -1507,8 +1503,8 @@ final class BlockImpactsEverythingEnum extends ImpactsEnum { final boolean indexHasOffsets; final boolean indexHasPayloads; - private int docFreq; // number of docs in this posting list - private long totalTermFreq; // number of positions in this posting list + private final int docFreq; // number of docs in this posting list + private final long totalTermFreq; // number of positions in this posting list private int docUpto; // how many docs we've read private int posDocUpTo; // for how many docs we've read positions, offsets, and payloads private int doc; // doc we last read @@ -1528,19 +1524,19 @@ final class BlockImpactsEverythingEnum extends ImpactsEnum { private long payPendingFP; // Where this term's postings start in the .doc file: - private long docTermStartFP; + private final long docTermStartFP; // Where this term's postings start in the .pos file: - private long posTermStartFP; + private final long posTermStartFP; // Where this term's payloads/offsets start in the .pay // file: - private long payTermStartFP; + private final long payTermStartFP; // File pointer where the last (vInt encoded) pos delta // block is. We need this to know whether to bulk // decode vs vInt decode the block: - private long lastPosBlockFP; + private final long lastPosBlockFP; private int nextSkipDoc = -1; @@ -1835,10 +1831,6 @@ public int advance(int target) throws IOException { private void skipPositions() throws IOException { // Skip positions now: int toSkip = posPendingCount - (int) freqBuffer[docBufferUpto - 1]; - // if (DEBUG) { - // System.out.println(" FPR.skipPositions: toSkip=" + toSkip); - // } - final int leftInBlock = BLOCK_SIZE - posBufferUpto; if (toSkip < leftInBlock) { int end = posBufferUpto + toSkip; diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java index f9b36114361ca..8dd99392625fd 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java @@ -54,8 +54,8 @@ *

    Therefore, we'll trim df before passing it to the interface. see trim(int) */ class ES812SkipReader extends MultiLevelSkipListReader { - private long[] docPointer; - private long[] posPointer; + private final long[] docPointer; + private final long[] posPointer; private long[] payPointer; private int[] posBufferUpto; private int[] payloadByteUpto; diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java index dbfb7c86a1475..98c516fc890e8 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java @@ -51,8 +51,8 @@ * uptos(position, payload). 4. start offset. */ final class ES812SkipWriter extends MultiLevelSkipListWriter { - private int[] lastSkipDoc; - private long[] lastSkipDocPointer; + private final int[] lastSkipDoc; + private final long[] lastSkipDocPointer; private long[] lastSkipPosPointer; private long[] lastSkipPayPointer; @@ -66,7 +66,7 @@ final class ES812SkipWriter extends MultiLevelSkipListWriter { private long curPayPointer; private int curPosBufferUpto; private int curPayloadByteUpto; - private CompetitiveImpactAccumulator[] curCompetitiveFreqNorms; + private final CompetitiveImpactAccumulator[] curCompetitiveFreqNorms; private boolean fieldHasPositions; private boolean fieldHasOffsets; private boolean fieldHasPayloads; @@ -197,7 +197,7 @@ protected void writeSkipData(int level, DataOutput skipBuffer) throws IOExceptio } CompetitiveImpactAccumulator competitiveFreqNorms = curCompetitiveFreqNorms[level]; - assert competitiveFreqNorms.getCompetitiveFreqNormPairs().size() > 0; + assert competitiveFreqNorms.getCompetitiveFreqNormPairs().isEmpty() == false; if (level + 1 < numberOfSkipLevels) { curCompetitiveFreqNorms[level + 1].addAll(competitiveFreqNorms); } diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java index fb90327770674..b6e1bb503045c 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java +++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java @@ -342,14 +342,10 @@ public BytesRef lookupOrd(int ord) throws IOException { @Override public int lookupTerm(BytesRef key) throws IOException { TermsEnum.SeekStatus status = termsEnum.seekCeil(key); - switch (status) { - case FOUND: - return Math.toIntExact(termsEnum.ord()); - case NOT_FOUND: - case END: - default: - return Math.toIntExact(-1L - termsEnum.ord()); - } + return switch (status) { + case FOUND -> Math.toIntExact(termsEnum.ord()); + default -> Math.toIntExact(-1L - termsEnum.ord()); + }; } @Override @@ -384,14 +380,10 @@ public BytesRef lookupOrd(long ord) throws IOException { @Override public long lookupTerm(BytesRef key) throws IOException { TermsEnum.SeekStatus status = termsEnum.seekCeil(key); - switch (status) { - case FOUND: - return termsEnum.ord(); - case NOT_FOUND: - case END: - default: - return -1L - termsEnum.ord(); - } + return switch (status) { + case FOUND -> termsEnum.ord(); + default -> -1L - termsEnum.ord(); + }; } @Override @@ -400,7 +392,7 @@ public TermsEnum termsEnum() throws IOException { } } - private class TermsDict extends BaseTermsEnum { + private static class TermsDict extends BaseTermsEnum { static final int LZ4_DECOMPRESSOR_PADDING = 7; final TermsDictEntry entry; From 8325a7196bd4375a6a46ff57c7d4cd7af09074e4 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Fri, 23 Aug 2024 11:11:47 +0300 Subject: [PATCH 164/389] Use StandardAnalyzer in MapperServiceTestCase (#112127) We currently use Lucene's `MockAnalyzer` that rarely injects some random payload to text fields. This leads to assert errors for synthetic source, where the roundtrip source (after printing and parsing the synthetic source) appears the same but there's a difference now in the FieldInfo for text mappers due to the injected payload. Fixes #112083 --- .../org/elasticsearch/index/mapper/MapperServiceTestCase.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index 272901eb19351..7c11e7446e5c5 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -788,7 +788,8 @@ protected TriFunction, MappedFieldType.F } protected RandomIndexWriter indexWriterForSyntheticSource(Directory directory) throws IOException { - return new RandomIndexWriter(random(), directory); + // MockAnalyzer (rarely) produces random payloads that lead to failures during assertReaderEquals. + return new RandomIndexWriter(random(), directory, new StandardAnalyzer()); } protected final String syntheticSource(DocumentMapper mapper, CheckedConsumer build) throws IOException { From 92d25c157a6e74a5b3b5a40928957bc311884040 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Fri, 23 Aug 2024 12:46:52 +0400 Subject: [PATCH 165/389] Fix id and routing types in indices.split YAML tests (#112059) --- .../40_routing_partition_size.yml | 72 +++++++++---------- .../indices.split/50_routing_required.yml | 48 ++++++------- 2 files changed, 60 insertions(+), 60 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml index 80a8ccf0d1063..11ffbe1d8464d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml @@ -16,22 +16,22 @@ more than 1: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world" } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -66,8 +66,8 @@ more than 1: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -76,8 +76,8 @@ more than 1: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -86,8 +86,8 @@ more than 1: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } @@ -117,22 +117,22 @@ exactly 1: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world" } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -167,8 +167,8 @@ exactly 1: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -177,8 +177,8 @@ exactly 1: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -187,8 +187,8 @@ exactly 1: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } @@ -221,22 +221,22 @@ nested: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world", "n": [{"foo": "goodbye world"}, {"foo": "more words"}] } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -271,8 +271,8 @@ nested: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -281,8 +281,8 @@ nested: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -291,8 +291,8 @@ nested: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml index 38bf9d72ef8ff..4c8d7736631c9 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml @@ -15,22 +15,22 @@ routing required: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world" } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -65,8 +65,8 @@ routing required: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -75,8 +75,8 @@ routing required: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -85,8 +85,8 @@ routing required: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } @@ -122,22 +122,22 @@ nested: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world", "n": [{"foo": "goodbye world"}, {"foo": "more words"}] } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -172,8 +172,8 @@ nested: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -182,8 +182,8 @@ nested: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -192,8 +192,8 @@ nested: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } From 34a78f3cf3e91cd13f51f1f4f8e378f8ed244a2b Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Fri, 23 Aug 2024 11:49:15 +0300 Subject: [PATCH 166/389] Add documentation to deprecate the global retention privileges. (#112020) --- docs/reference/security/authorization/privileges.asciidoc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/reference/security/authorization/privileges.asciidoc b/docs/reference/security/authorization/privileges.asciidoc index f15654bef2d1f..747b1eef40441 100644 --- a/docs/reference/security/authorization/privileges.asciidoc +++ b/docs/reference/security/authorization/privileges.asciidoc @@ -101,6 +101,9 @@ deprecated[7.5] Use `manage_transform` instead. + This privilege is not available in {serverless-full}. +`manage_data_stream_global_retention`:: +This privilege has no effect.deprecated[8.16] + `manage_enrich`:: All operations related to managing and executing enrich policies. @@ -223,6 +226,9 @@ security roles of the user who created or updated them. All cluster read-only operations, like cluster health and state, hot threads, node info, node and cluster stats, and pending cluster tasks. +`monitor_data_stream_global_retention`:: +This privilege has no effect.deprecated[8.16] + `monitor_enrich`:: All read-only operations related to managing and executing enrich policies. From d6d305805608e203d5b1d1a1308454cecb4ac2ac Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 23 Aug 2024 17:17:05 +0700 Subject: [PATCH 167/389] Fix synthetic source NestedObjectMapper assertion. (#112131) The parentDoc parameter can be -1 and the assertion needs to take this into account (just like the next line is doing). Closes #111998 --- .../java/org/elasticsearch/index/mapper/NestedObjectMapper.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index d866b3c78173b..f61f91250516a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -441,7 +441,7 @@ public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf } private List collectChildren(int parentDoc, BitSet parentDocs, DocIdSetIterator childIt) throws IOException { - assert parentDocs.get(parentDoc) : "wrong context, doc " + parentDoc + " is not a parent of " + nestedTypePath; + assert parentDoc < 0 || parentDocs.get(parentDoc) : "wrong context, doc " + parentDoc + " is not a parent of " + nestedTypePath; final int prevParentDoc = parentDoc > 0 ? parentDocs.prevSetBit(parentDoc - 1) : -1; int childDocId = childIt.docID(); if (childDocId <= prevParentDoc) { From 2b1170509b39b5a23c00bbfa9da87144868a5df2 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 23 Aug 2024 17:17:54 +0700 Subject: [PATCH 168/389] Change subobjects yaml tests to use composable index templates. (#112129) Currently the legacy templates are being used which are deprecated. --- .../test/index/91_metrics_no_subobjects.yml | 80 ++++++++++--------- .../test/index/92_metrics_auto_subobjects.yml | 80 ++++++++++--------- 2 files changed, 84 insertions(+), 76 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml index 94c19a4d69e17..ca6d65349c923 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml @@ -6,20 +6,21 @@ reason: added in 8.3.0 - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - dynamic_templates: - - no_subobjects: - match: metrics - mapping: - type: object - subobjects: false - properties: - host.name: - type: keyword + template: + mappings: + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: false + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -70,15 +71,16 @@ reason: added in 8.3.0 - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - subobjects: false - properties: - host.name: - type: keyword + template: + mappings: + subobjects: false + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -129,22 +131,23 @@ reason: added in 8.4.0 - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - _source: - mode: synthetic - dynamic_templates: - - no_subobjects: - match: metrics - mapping: - type: object - subobjects: false - properties: - host.name: - type: keyword + template: + mappings: + _source: + mode: synthetic + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: false + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -196,17 +199,18 @@ reason: added in 8.4.0 - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - _source: - mode: synthetic - subobjects: false - properties: - host.name: - type: keyword + template: + mappings: + _source: + mode: synthetic + subobjects: false + properties: + host.name: + type: keyword - do: allowed_warnings_regex: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml index 984c1c22b2177..e4fee3569fef2 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml @@ -6,20 +6,21 @@ reason: requires supporting subobjects auto setting - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - dynamic_templates: - - no_subobjects: - match: metrics - mapping: - type: object - subobjects: auto - properties: - host.name: - type: keyword + template: + mappings: + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: auto + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -70,15 +71,16 @@ reason: requires supporting subobjects auto setting - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - subobjects: auto - properties: - host.name: - type: keyword + template: + mappings: + subobjects: auto + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -129,22 +131,23 @@ reason: added in 8.4.0 - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - _source: - mode: synthetic - dynamic_templates: - - no_subobjects: - match: metrics - mapping: - type: object - subobjects: auto - properties: - host.name: - type: keyword + template: + mappings: + _source: + mode: synthetic + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: auto + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -196,17 +199,18 @@ reason: added in 8.4.0 - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - _source: - mode: synthetic - subobjects: auto - properties: - host.name: - type: keyword + template: + mappings: + _source: + mode: synthetic + subobjects: auto + properties: + host.name: + type: keyword - do: allowed_warnings_regex: From db0cc8122922fe7930199da889ea53fd72b30220 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Fri, 23 Aug 2024 14:02:14 +0200 Subject: [PATCH 169/389] Add support for spatial relationships in point field mapper (#112126) Lucene only supports intersects queries over XYPoint fields but it is still possible to represent all the spatial relationships using just that query. --- docs/changelog/112126.yaml | 5 + .../search/ShapeQueryOverPointTests.java | 128 +------ .../spatial/search/ShapeQueryTestCase.java | 311 ++++++++++-------- .../index/mapper/PointFieldMapper.java | 3 +- .../index/query/ShapeQueryPointProcessor.java | 278 ++++++++++++++-- .../ShapeQueryBuilderOverPointTests.java | 15 +- 6 files changed, 432 insertions(+), 308 deletions(-) create mode 100644 docs/changelog/112126.yaml diff --git a/docs/changelog/112126.yaml b/docs/changelog/112126.yaml new file mode 100644 index 0000000000000..f6a7aeb893a5e --- /dev/null +++ b/docs/changelog/112126.yaml @@ -0,0 +1,5 @@ +pr: 112126 +summary: Add support for spatial relationships in point field mapper +area: Geo +type: enhancement +issues: [] diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverPointTests.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverPointTests.java index 0563c8f281cb8..f4ee7f264d4f7 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverPointTests.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverPointTests.java @@ -6,141 +6,23 @@ */ package org.elasticsearch.xpack.spatial.search; -import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.geo.ShapeRelation; -import org.elasticsearch.geometry.GeometryCollection; -import org.elasticsearch.geometry.Line; -import org.elasticsearch.geometry.LinearRing; -import org.elasticsearch.geometry.MultiLine; -import org.elasticsearch.geometry.MultiPoint; -import org.elasticsearch.geometry.Point; -import org.elasticsearch.geometry.Rectangle; -import org.elasticsearch.geometry.ShapeType; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.spatial.index.query.ShapeQueryBuilder; -import org.hamcrest.CoreMatchers; - -import java.util.List; public class ShapeQueryOverPointTests extends ShapeQueryTestCase { @Override protected XContentBuilder createDefaultMapping() throws Exception { - XContentBuilder xcb = XContentFactory.jsonBuilder() + final boolean isIndexed = randomBoolean(); + final boolean hasDocValues = isIndexed == false || randomBoolean(); + return XContentFactory.jsonBuilder() .startObject() .startObject("properties") .startObject(defaultFieldName) .field("type", "point") + .field("index", isIndexed) + .field("doc_values", hasDocValues) .endObject() .endObject() .endObject(); - - return xcb; - } - - public void testProcessRelationSupport() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - Rectangle rectangle = new Rectangle(-35, -25, -25, -35); - - for (ShapeRelation shapeRelation : ShapeRelation.values()) { - if (shapeRelation.equals(ShapeRelation.INTERSECTS) == false) { - SearchPhaseExecutionException e = expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch("test") - .setQuery(new ShapeQueryBuilder(defaultFieldName, rectangle).relation(shapeRelation)) - .get() - ); - assertThat( - e.getCause().getMessage(), - CoreMatchers.containsString(shapeRelation + " query relation not supported for Field [" + defaultFieldName + "]") - ); - } - } - } - - public void testQueryLine() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - Line line = new Line(new double[] { -25, -25 }, new double[] { -35, -35 }); - - try { - client().prepareSearch("test").setQuery(new ShapeQueryBuilder(defaultFieldName, line)).get(); - } catch (SearchPhaseExecutionException e) { - assertThat(e.getCause().getMessage(), CoreMatchers.containsString("does not support " + ShapeType.LINESTRING + " queries")); - } - } - - public void testQueryLinearRing() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - LinearRing linearRing = new LinearRing(new double[] { -25, -35, -25 }, new double[] { -25, -35, -25 }); - - IllegalArgumentException ex = expectThrows( - IllegalArgumentException.class, - () -> new ShapeQueryBuilder(defaultFieldName, linearRing) - ); - assertThat(ex.getMessage(), CoreMatchers.containsString("[LINEARRING] geometries are not supported")); - - ex = expectThrows( - IllegalArgumentException.class, - () -> new ShapeQueryBuilder(defaultFieldName, new GeometryCollection<>(List.of(linearRing))) - ); - assertThat(ex.getMessage(), CoreMatchers.containsString("[LINEARRING] geometries are not supported")); - } - - public void testQueryMultiLine() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - Line lsb1 = new Line(new double[] { -35, -25 }, new double[] { -35, -25 }); - Line lsb2 = new Line(new double[] { -15, -5 }, new double[] { -15, -5 }); - - MultiLine multiline = new MultiLine(List.of(lsb1, lsb2)); - try { - client().prepareSearch("test").setQuery(new ShapeQueryBuilder(defaultFieldName, multiline)).get(); - } catch (Exception e) { - assertThat( - e.getCause().getMessage(), - CoreMatchers.containsString("does not support " + ShapeType.MULTILINESTRING + " queries") - ); - } - } - - public void testQueryMultiPoint() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - MultiPoint multiPoint = new MultiPoint(List.of(new Point(-35, -25), new Point(-15, -5))); - - try { - client().prepareSearch("test").setQuery(new ShapeQueryBuilder(defaultFieldName, multiPoint)).get(); - } catch (Exception e) { - assertThat(e.getCause().getMessage(), CoreMatchers.containsString("does not support " + ShapeType.MULTIPOINT + " queries")); - } } - - public void testQueryPoint() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - Point point = new Point(-35, -2); - - try { - client().prepareSearch("test").setQuery(new ShapeQueryBuilder(defaultFieldName, point)).get(); - } catch (Exception e) { - assertThat(e.getCause().getMessage(), CoreMatchers.containsString("does not support " + ShapeType.POINT + " queries")); - } - } - } diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryTestCase.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryTestCase.java index 38d0a30b593b6..1ac6bf3b6fd31 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryTestCase.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryTestCase.java @@ -7,16 +7,18 @@ package org.elasticsearch.xpack.spatial.search; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.geometry.Circle; +import org.elasticsearch.geometry.GeometryCollection; +import org.elasticsearch.geometry.Line; import org.elasticsearch.geometry.LinearRing; +import org.elasticsearch.geometry.MultiLine; +import org.elasticsearch.geometry.MultiPoint; import org.elasticsearch.geometry.MultiPolygon; import org.elasticsearch.geometry.Point; import org.elasticsearch.geometry.Polygon; import org.elasticsearch.geometry.Rectangle; -import org.elasticsearch.geometry.utils.WellKnownText; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -26,6 +28,7 @@ import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.spatial.LocalStateSpatialPlugin; import org.elasticsearch.xpack.spatial.index.query.ShapeQueryBuilder; +import org.hamcrest.CoreMatchers; import java.util.Collection; import java.util.List; @@ -35,6 +38,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -46,29 +50,18 @@ protected Collection> getPlugins() { return pluginList(LocalStateSpatialPlugin.class, LocalStateCompositeXPackPlugin.class); } - protected abstract XContentBuilder createDefaultMapping() throws Exception; - - static String defaultFieldName = "xy"; - static String defaultIndexName = "test-points"; + @Override + public void setUp() throws Exception { + super.setUp(); - public void testNullShape() throws Exception { String mapping = Strings.toString(createDefaultMapping()); indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); ensureGreen(); prepareIndex(defaultIndexName).setId("aNullshape") - .setSource("{\"geo\": null}", XContentType.JSON) + .setSource("{\"" + defaultFieldName + "\": null}", XContentType.JSON) .setRefreshPolicy(IMMEDIATE) .get(); - GetResponse result = client().prepareGet(defaultIndexName, "aNullshape").get(); - assertThat(result.getField("location"), nullValue()); - }; - - public void testIndexPointsFilterRectangle() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - prepareIndex(defaultIndexName).setId("1") .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) .setRefreshPolicy(IMMEDIATE) @@ -78,74 +71,82 @@ public void testIndexPointsFilterRectangle() throws Exception { .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-45 -50)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); + prepareIndex(defaultIndexName).setId("3") + .setSource(jsonBuilder().startObject().field("name", "Document 3").field(defaultFieldName, "POINT(50 50)").endObject()) + .setRefreshPolicy(IMMEDIATE) + .get(); + prepareIndex(defaultIndexName).setId("4") + .setSource( + jsonBuilder().startObject() + .field("name", "Document 4") + .field(defaultFieldName, new String[] { "POINT(-30 -30)", "POINT(50 50)" }) + .endObject() + ) + .setRefreshPolicy(IMMEDIATE) + .get(); + prepareIndex(defaultIndexName).setId("5") + .setSource( + jsonBuilder().startObject() + .field("name", "Document 5") + .field(defaultFieldName, new String[] { "POINT(60 60)", "POINT(50 50)" }) + .endObject() + ) + .setRefreshPolicy(IMMEDIATE) + .get(); + } + + protected abstract XContentBuilder createDefaultMapping() throws Exception; + + static String defaultFieldName = "xy"; + static String defaultIndexName = "test-points"; + public void testNullShape() { + GetResponse result = client().prepareGet(defaultIndexName, "aNullshape").get(); + assertThat(result.getField(defaultFieldName), nullValue()); + }; + + public void testIndexPointsFilterRectangle() { Rectangle rectangle = new Rectangle(-45, 45, 45, -45); assertNoFailuresAndResponse( client().prepareSearch(defaultIndexName) .setQuery(new ShapeQueryBuilder(defaultFieldName, rectangle).relation(ShapeRelation.INTERSECTS)), response -> { - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getHits().length, equalTo(1)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getAt(0).getId(), anyOf(equalTo("1"), equalTo("4"))); + assertThat(response.getHits().getAt(1).getId(), anyOf(equalTo("1"), equalTo("4"))); } ); // default query, without specifying relation (expect intersects) - assertNoFailuresAndResponse( client().prepareSearch(defaultIndexName).setQuery(new ShapeQueryBuilder(defaultFieldName, rectangle)), response -> { - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getHits().length, equalTo(1)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getAt(0).getId(), anyOf(equalTo("1"), equalTo("4"))); + assertThat(response.getHits().getAt(1).getId(), anyOf(equalTo("1"), equalTo("4"))); } ); } - public void testIndexPointsCircle() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - - prepareIndex(defaultIndexName).setId("1") - .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("2") - .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-45 -50)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - + public void testIndexPointsCircle() { Circle circle = new Circle(-30, -30, 1); assertNoFailuresAndResponse( client().prepareSearch(defaultIndexName) .setQuery(new ShapeQueryBuilder(defaultFieldName, circle).relation(ShapeRelation.INTERSECTS)), response -> { - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getHits().length, equalTo(1)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getAt(0).getId(), anyOf(equalTo("1"), equalTo("4"))); + assertThat(response.getHits().getAt(1).getId(), anyOf(equalTo("1"), equalTo("4"))); } ); } - public void testIndexPointsPolygon() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - - prepareIndex(defaultIndexName).setId("1") - .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-30 -30)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("2") - .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-45 -50)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - + public void testIndexPointsPolygon() { Polygon polygon = new Polygon(new LinearRing(new double[] { -35, -35, -25, -25, -35 }, new double[] { -35, -25, -25, -35, -35 })); assertNoFailuresAndResponse( @@ -153,32 +154,14 @@ public void testIndexPointsPolygon() throws Exception { .setQuery(new ShapeQueryBuilder(defaultFieldName, polygon).relation(ShapeRelation.INTERSECTS)), response -> { SearchHits searchHits = response.getHits(); - assertThat(searchHits.getTotalHits().value, equalTo(1L)); - assertThat(searchHits.getAt(0).getId(), equalTo("1")); + assertThat(searchHits.getTotalHits().value, equalTo(2L)); + assertThat(searchHits.getAt(0).getId(), anyOf(equalTo("1"), equalTo("4"))); + assertThat(searchHits.getAt(1).getId(), anyOf(equalTo("1"), equalTo("4"))); } ); } - public void testIndexPointsMultiPolygon() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - - prepareIndex(defaultIndexName).setId("1") - .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("2") - .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-40 -40)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("3") - .setSource(jsonBuilder().startObject().field("name", "Document 3").field(defaultFieldName, "POINT(-50 -50)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - + public void testIndexPointsMultiPolygon() { Polygon encloseDocument1Shape = new Polygon( new LinearRing(new double[] { -35, -35, -25, -25, -35 }, new double[] { -35, -25, -25, -35, -35 }) ); @@ -192,29 +175,16 @@ public void testIndexPointsMultiPolygon() throws Exception { client().prepareSearch(defaultIndexName) .setQuery(new ShapeQueryBuilder(defaultFieldName, mp).relation(ShapeRelation.INTERSECTS)), response -> { - assertThat(response.getHits().getTotalHits().value, equalTo(2L)); - assertThat(response.getHits().getHits().length, equalTo(2)); - assertThat(response.getHits().getAt(0).getId(), not(equalTo("2"))); - assertThat(response.getHits().getAt(1).getId(), not(equalTo("2"))); + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getAt(0).getId(), not(equalTo("3"))); + assertThat(response.getHits().getAt(1).getId(), not(equalTo("3"))); + assertThat(response.getHits().getAt(2).getId(), not(equalTo("3"))); } ); } - public void testIndexPointsRectangle() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - - prepareIndex(defaultIndexName).setId("1") - .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("2") - .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-45 -50)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - + public void testIndexPointsRectangle() { Rectangle rectangle = new Rectangle(-50, -40, -45, -55); assertNoFailuresAndResponse( @@ -229,20 +199,6 @@ public void testIndexPointsRectangle() throws Exception { } public void testIndexPointsIndexedRectangle() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - - prepareIndex(defaultIndexName).setId("point1") - .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-30 -30)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("point2") - .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-45 -50)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - String indexedShapeIndex = "indexed_query_shapes"; String indexedShapePath = "shape"; String queryShapesMapping = Strings.toString( @@ -278,7 +234,7 @@ public void testIndexPointsIndexedRectangle() throws Exception { response -> { assertThat(response.getHits().getTotalHits().value, equalTo(1L)); assertThat(response.getHits().getHits().length, equalTo(1)); - assertThat(response.getHits().getAt(0).getId(), equalTo("point2")); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); } ); @@ -291,53 +247,122 @@ public void testIndexPointsIndexedRectangle() throws Exception { ), 0L ); + } - public void testDistanceQuery() throws Exception { - indicesAdmin().prepareCreate("test_distance").setMapping("location", "type=shape").get(); - ensureGreen(); + public void testDistanceQuery() { + Circle circle = new Circle(-25, -25, 10); - Circle circle = new Circle(1, 0, 10); - - client().index( - new IndexRequest("test_distance").source( - jsonBuilder().startObject().field("location", WellKnownText.toWKT(new Point(2, 2))).endObject() - ).setRefreshPolicy(IMMEDIATE) - ).actionGet(); - client().index( - new IndexRequest("test_distance").source( - jsonBuilder().startObject().field("location", WellKnownText.toWKT(new Point(3, 1))).endObject() - ).setRefreshPolicy(IMMEDIATE) - ).actionGet(); - client().index( - new IndexRequest("test_distance").source( - jsonBuilder().startObject().field("location", WellKnownText.toWKT(new Point(-20, -30))).endObject() - ).setRefreshPolicy(IMMEDIATE) - ).actionGet(); - client().index( - new IndexRequest("test_distance").source( - jsonBuilder().startObject().field("location", WellKnownText.toWKT(new Point(20, 30))).endObject() - ).setRefreshPolicy(IMMEDIATE) - ).actionGet(); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, circle).relation(ShapeRelation.INTERSECTS)), + 2L + ); + + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, circle).relation(ShapeRelation.WITHIN)), + 1L + ); + + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, circle).relation(ShapeRelation.DISJOINT)), + 3L + ); assertHitCount( - client().prepareSearch("test_distance").setQuery(new ShapeQueryBuilder("location", circle).relation(ShapeRelation.WITHIN)), + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, circle).relation(ShapeRelation.CONTAINS)), + 0L + ); + } + + public void testIndexPointsQueryLinearRing() { + LinearRing linearRing = new LinearRing(new double[] { -50, -50 }, new double[] { 50, 50 }); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> new ShapeQueryBuilder(defaultFieldName, linearRing) + ); + assertThat(ex.getMessage(), CoreMatchers.containsString("[LINEARRING] geometries are not supported")); + + ex = expectThrows( + IllegalArgumentException.class, + () -> new ShapeQueryBuilder(defaultFieldName, new GeometryCollection<>(List.of(linearRing))) + ); + assertThat(ex.getMessage(), CoreMatchers.containsString("[LINEARRING] geometries are not supported")); + } + + public void testIndexPointsQueryLine() { + Line line = new Line(new double[] { 100, -30 }, new double[] { -100, -30 }); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, line).relation(ShapeRelation.INTERSECTS)), 2L ); + } + + public void testIndexPointsQueryMultiLine() { + MultiLine multiLine = new MultiLine( + List.of( + new Line(new double[] { 100, -30 }, new double[] { -100, -30 }), + new Line(new double[] { 100, -20 }, new double[] { -100, -20 }) + ) + ); assertHitCount( - client().prepareSearch("test_distance").setQuery(new ShapeQueryBuilder("location", circle).relation(ShapeRelation.INTERSECTS)), + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, multiLine).relation(ShapeRelation.INTERSECTS)), 2L ); + } + public void testIndexPointsQueryPoint() { + Point point = new Point(-30, -30); assertHitCount( - client().prepareSearch("test_distance").setQuery(new ShapeQueryBuilder("location", circle).relation(ShapeRelation.DISJOINT)), + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, point).relation(ShapeRelation.INTERSECTS)), 2L ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, point).relation(ShapeRelation.WITHIN)), + 1L + ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, point).relation(ShapeRelation.CONTAINS)), + 2L + ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, point).relation(ShapeRelation.DISJOINT)), + 3L + ); + } + public void testIndexPointsQueryMultiPoint() { + MultiPoint multiPoint = new MultiPoint(List.of(new Point(-30, -30), new Point(50, 50))); assertHitCount( - client().prepareSearch("test_distance").setQuery(new ShapeQueryBuilder("location", circle).relation(ShapeRelation.CONTAINS)), - 0L + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, multiPoint).relation(ShapeRelation.INTERSECTS)), + 4L + ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, multiPoint).relation(ShapeRelation.WITHIN)), + 3L + ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, multiPoint).relation(ShapeRelation.CONTAINS)), + 1L + ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, multiPoint).relation(ShapeRelation.DISJOINT)), + 1L ); } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java index d98fe7fdfc6ec..9412dc3c5eb53 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java @@ -215,7 +215,8 @@ public String typeName() { @Override public Query shapeQuery(Geometry shape, String fieldName, ShapeRelation relation, SearchExecutionContext context) { - return queryProcessor.shapeQuery(shape, fieldName, relation, context); + failIfNotIndexedNorDocValuesFallback(context); + return queryProcessor.shapeQuery(shape, fieldName, relation, isIndexed(), hasDocValues()); } @Override diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java index a8c084e7e0f01..22616eabf8211 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java @@ -8,50 +8,272 @@ import org.apache.lucene.document.XYDocValuesField; import org.apache.lucene.document.XYPointField; +import org.apache.lucene.geo.Component2D; import org.apache.lucene.geo.XYGeometry; +import org.apache.lucene.geo.XYPoint; +import org.apache.lucene.geo.XYRectangle; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.common.geo.LuceneGeometriesUtils; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.geometry.ShapeType; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.query.QueryShardException; -import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.xpack.spatial.index.mapper.PointFieldMapper; -import java.util.function.Consumer; +import java.util.Arrays; +/** Generates a lucene query for a spatial query over a point field. + * + * Note that lucene only supports intersects spatial relation so we build other relations + * using just that one. + * */ public class ShapeQueryPointProcessor { - public Query shapeQuery(Geometry geometry, String fieldName, ShapeRelation relation, SearchExecutionContext context) { - final boolean hasDocValues = validateIsPointFieldType(fieldName, context); - // only the intersects relation is supported for indexed cartesian point types - if (relation != ShapeRelation.INTERSECTS) { - throw new QueryShardException(context, relation + " query relation not supported for Field [" + fieldName + "]."); - } - final Consumer checker = t -> { - if (t == ShapeType.POINT || t == ShapeType.MULTIPOINT || t == ShapeType.LINESTRING || t == ShapeType.MULTILINESTRING) { - throw new QueryShardException(context, "Field [" + fieldName + "] does not support " + t + " queries"); - } + public Query shapeQuery(Geometry geometry, String fieldName, ShapeRelation relation, boolean isIndexed, boolean hasDocValues) { + assert isIndexed || hasDocValues; + final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); + // XYPointField only supports intersects query so we build all the relationships using that logic. + // it is not very efficient but it works. + return switch (relation) { + case INTERSECTS -> buildIntersectsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); + case DISJOINT -> buildDisjointQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); + case CONTAINS -> buildContainsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); + case WITHIN -> buildWithinQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); }; - final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, checker); - Query query = XYPointField.newGeometryQuery(fieldName, luceneGeometries); - if (hasDocValues) { - final Query queryDocValues = XYDocValuesField.newSlowGeometryQuery(fieldName, luceneGeometries); - query = new IndexOrDocValuesQuery(query, queryDocValues); + } + + private static Query buildIntersectsQuery(String fieldName, boolean isIndexed, boolean hasDocValues, XYGeometry... luceneGeometries) { + // This is supported natively in lucene + Query query; + if (isIndexed) { + query = XYPointField.newGeometryQuery(fieldName, luceneGeometries); + if (hasDocValues) { + final Query queryDocValues = XYDocValuesField.newSlowGeometryQuery(fieldName, luceneGeometries); + query = new IndexOrDocValuesQuery(query, queryDocValues); + } + } else { + query = XYDocValuesField.newSlowGeometryQuery(fieldName, luceneGeometries); } return query; } - private boolean validateIsPointFieldType(String fieldName, SearchExecutionContext context) { - MappedFieldType fieldType = context.getFieldType(fieldName); - if (fieldType instanceof PointFieldMapper.PointFieldType == false) { - throw new QueryShardException( - context, - "Expected " + PointFieldMapper.CONTENT_TYPE + " field type for Field [" + fieldName + "] but found " + fieldType.typeName() + private static Query buildDisjointQuery(String fieldName, boolean isIndexed, boolean hasDocValues, XYGeometry... luceneGeometries) { + // first collect all the documents that contain a shape + final BooleanQuery.Builder builder = new BooleanQuery.Builder(); + if (hasDocValues) { + builder.add(new FieldExistsQuery(fieldName), BooleanClause.Occur.FILTER); + } else { + builder.add( + buildIntersectsQuery( + fieldName, + isIndexed, + hasDocValues, + new XYRectangle(-Float.MAX_VALUE, Float.MAX_VALUE, -Float.MAX_VALUE, Float.MAX_VALUE) + ), + BooleanClause.Occur.FILTER ); } - return fieldType.hasDocValues(); + // then remove all intersecting documents + builder.add(buildIntersectsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries), BooleanClause.Occur.MUST_NOT); + return builder.build(); + } + + private static Query buildContainsQuery(String fieldName, boolean isIndexed, boolean hasDocValues, XYGeometry... luceneGeometries) { + // for non-point data the result is always false + if (allPoints(luceneGeometries) == false) { + return new MatchNoDocsQuery(); + } + // for a unique point, it behaves like intersect + if (luceneGeometries.length == 1) { + return buildIntersectsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); + } + // for a multi point, all points needs to be in the document + final BooleanQuery.Builder builder = new BooleanQuery.Builder(); + for (XYGeometry geometry : luceneGeometries) { + builder.add(buildIntersectsQuery(fieldName, isIndexed, hasDocValues, geometry), BooleanClause.Occur.FILTER); + } + return builder.build(); + } + + private static Query buildWithinQuery(String fieldName, boolean isIndexed, boolean hasDocValues, XYGeometry... luceneGeometries) { + final BooleanQuery.Builder builder = new BooleanQuery.Builder(); + // collect all the intersecting documents + builder.add(buildIntersectsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries), BooleanClause.Occur.FILTER); + // This is the tricky part as we need to remove all documents that they have at least one disjoint point. + // In order to do that, we introduce a InverseXYGeometry which return all documents that have at least one disjoint point + // with the original geometry. + builder.add( + buildIntersectsQuery(fieldName, isIndexed, hasDocValues, new InverseXYGeometry(luceneGeometries)), + BooleanClause.Occur.MUST_NOT + ); + return builder.build(); + } + + private static boolean allPoints(XYGeometry[] geometries) { + return Arrays.stream(geometries).allMatch(g -> g instanceof XYPoint); + } + + private static class InverseXYGeometry extends XYGeometry { + private final XYGeometry[] geometries; + + InverseXYGeometry(XYGeometry... geometries) { + this.geometries = geometries; + } + + @Override + protected Component2D toComponent2D() { + final Component2D component2D = XYGeometry.create(geometries); + return new Component2D() { + @Override + public double getMinX() { + return -Float.MAX_VALUE; + } + + @Override + public double getMaxX() { + return Float.MAX_VALUE; + } + + @Override + public double getMinY() { + return -Float.MAX_VALUE; + } + + @Override + public double getMaxY() { + return Float.MAX_VALUE; + } + + @Override + public boolean contains(double x, double y) { + return component2D.contains(x, y) == false; + } + + @Override + public PointValues.Relation relate(double minX, double maxX, double minY, double maxY) { + PointValues.Relation relation = component2D.relate(minX, maxX, minY, maxY); + return switch (relation) { + case CELL_INSIDE_QUERY -> PointValues.Relation.CELL_OUTSIDE_QUERY; + case CELL_OUTSIDE_QUERY -> PointValues.Relation.CELL_INSIDE_QUERY; + case CELL_CROSSES_QUERY -> PointValues.Relation.CELL_CROSSES_QUERY; + }; + } + + @Override + public boolean intersectsLine( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + double bX, + double bY + ) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean intersectsTriangle( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + double bX, + double bY, + double cX, + double cY + ) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean containsLine( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + double bX, + double bY + ) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean containsTriangle( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + double bX, + double bY, + double cX, + double cY + ) { + throw new UnsupportedOperationException(); + } + + @Override + public WithinRelation withinPoint(double x, double y) { + throw new UnsupportedOperationException(); + } + + @Override + public WithinRelation withinLine( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + boolean ab, + double bX, + double bY + ) { + throw new UnsupportedOperationException(); + } + + @Override + public WithinRelation withinTriangle( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + boolean ab, + double bX, + double bY, + boolean bc, + double cX, + double cY, + boolean ca + ) { + throw new UnsupportedOperationException(); + } + }; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + InverseXYGeometry that = (InverseXYGeometry) o; + return Arrays.equals(geometries, that.geometries); + } + + @Override + public int hashCode() { + return Arrays.hashCode(geometries); + } } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverPointTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverPointTests.java index db67b1f1e998b..05756168991c9 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverPointTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverPointTests.java @@ -30,22 +30,11 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws @Override protected ShapeRelation getShapeRelation(ShapeType type) { - return ShapeRelation.INTERSECTS; + return randomFrom(ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS, ShapeRelation.DISJOINT, ShapeRelation.WITHIN); } @Override protected Geometry getGeometry() { - if (randomBoolean()) { - if (randomBoolean()) { - return ShapeTestUtils.randomMultiPolygon(false); - } else { - return ShapeTestUtils.randomPolygon(false); - } - } else if (randomBoolean()) { - // it should be a circle - return ShapeTestUtils.randomPolygon(false); - } else { - return ShapeTestUtils.randomRectangle(); - } + return ShapeTestUtils.randomGeometry(false); } } From 3ab163450916be7f39158691517a788fdf7cc391 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 23 Aug 2024 22:32:20 +1000 Subject: [PATCH 170/389] Mute org.elasticsearch.test.rest.ClientYamlTestSuiteIT org.elasticsearch.test.rest.ClientYamlTestSuiteIT #112143 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index ec097616c2af6..20cf821f68c5e 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -182,6 +182,8 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {stats.ByTwoCalculatedSecondOverwritesReferencingFirst SYNC} issue: https://github.com/elastic/elasticsearch/issues/112118 +- class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT + issue: https://github.com/elastic/elasticsearch/issues/112143 # Examples: # From 915528c00e9f5de9f74c86c2e775b96c60eced7f Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 23 Aug 2024 23:08:09 +1000 Subject: [PATCH 171/389] Mute org.elasticsearch.xpack.test.rest.XPackRestIT test {p0=transform/preview_transforms/Test preview transform latest} #112144 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 20cf821f68c5e..f57c3dbcc2a6d 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -184,6 +184,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/112118 - class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/112143 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=transform/preview_transforms/Test preview transform latest} + issue: https://github.com/elastic/elasticsearch/issues/112144 # Examples: # From bcad4f0d24772a34b25b77b3abc58687c5a1df69 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 23 Aug 2024 23:59:44 +1000 Subject: [PATCH 172/389] Mute org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT #112147 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index f57c3dbcc2a6d..463075f1f93ae 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -187,6 +187,8 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/preview_transforms/Test preview transform latest} issue: https://github.com/elastic/elasticsearch/issues/112144 +- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT + issue: https://github.com/elastic/elasticsearch/issues/112147 # Examples: # From 9847a315fce870ff9288c7bfe86a00ba0f40013b Mon Sep 17 00:00:00 2001 From: Kathleen DeRusso Date: Fri, 23 Aug 2024 10:41:03 -0400 Subject: [PATCH 173/389] Semantic reranking should fail whenever inference ID does not exist (#112038) * Semantic reranking should fail whenever inference ID does not exist * Short circuit text similarity reranking on empty result set * Update tests * Remove test - it doesn't do anything useful * Update docs/changelog/112038.yaml --- docs/changelog/112038.yaml | 6 ++ ...ankFeaturePhaseRankCoordinatorContext.java | 16 ++--- ...ankFeaturePhaseRankCoordinatorContext.java | 19 ++++-- ...aturePhaseRankCoordinatorContextTests.java | 19 ++++++ .../70_text_similarity_rank_retriever.yml | 67 +++++++++++++++++-- 5 files changed, 104 insertions(+), 23 deletions(-) create mode 100644 docs/changelog/112038.yaml diff --git a/docs/changelog/112038.yaml b/docs/changelog/112038.yaml new file mode 100644 index 0000000000000..6cbfb373b7420 --- /dev/null +++ b/docs/changelog/112038.yaml @@ -0,0 +1,6 @@ +pr: 112038 +summary: Semantic reranking should fail whenever inference ID does not exist +area: Relevance +type: bug +issues: + - 111934 diff --git a/server/src/main/java/org/elasticsearch/search/rank/context/RankFeaturePhaseRankCoordinatorContext.java b/server/src/main/java/org/elasticsearch/search/rank/context/RankFeaturePhaseRankCoordinatorContext.java index 02834f03f54ab..9faa5e4e4450c 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/context/RankFeaturePhaseRankCoordinatorContext.java +++ b/server/src/main/java/org/elasticsearch/search/rank/context/RankFeaturePhaseRankCoordinatorContext.java @@ -74,16 +74,12 @@ public void computeRankScoresForGlobalResults( RankFeatureDoc[] featureDocs = extractFeatureDocs(rankSearchResults); // generate the final `topResults` results, and pass them to fetch phase through the `rankListener` - if (featureDocs.length == 0) { - rankListener.onResponse(new RankFeatureDoc[0]); - } else { - computeScores(featureDocs, rankListener.delegateFailureAndWrap((listener, scores) -> { - for (int i = 0; i < featureDocs.length; i++) { - featureDocs[i].score = scores[i]; - } - listener.onResponse(featureDocs); - })); - } + computeScores(featureDocs, rankListener.delegateFailureAndWrap((listener, scores) -> { + for (int i = 0; i < featureDocs.length; i++) { + featureDocs[i].score = scores[i]; + } + listener.onResponse(featureDocs); + })); } /** diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java index 42413c35fcbff..cad11cbdc9d5b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java @@ -62,6 +62,7 @@ protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener rankedDocs = ((RankedDocsResults) results).getRankedDocs(); + if (rankedDocs.size() != featureDocs.length) { l.onFailure( new IllegalStateException( @@ -104,12 +105,18 @@ protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener featureData = Arrays.stream(featureDocs).map(x -> x.featureData).toList(); - InferenceAction.Request inferenceRequest = generateRequest(featureData); - try { - client.execute(InferenceAction.INSTANCE, inferenceRequest, inferenceListener); - } finally { - inferenceRequest.decRef(); + + // Short circuit on empty results after request validation + if (featureDocs.length == 0) { + inferenceListener.onResponse(new InferenceAction.Response(new RankedDocsResults(List.of()))); + } else { + List featureData = Arrays.stream(featureDocs).map(x -> x.featureData).toList(); + InferenceAction.Request inferenceRequest = generateRequest(featureData); + try { + client.execute(InferenceAction.INSTANCE, inferenceRequest, inferenceListener); + } finally { + inferenceRequest.decRef(); + } } }); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java index 2e9be42b5c5d4..d6c476cdc15d6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java @@ -61,4 +61,23 @@ public void onFailure(Exception e) { ); } + public void testComputeScoresForEmpty() { + subject.computeScores(new RankFeatureDoc[0], new ActionListener<>() { + @Override + public void onResponse(float[] floats) { + assertArrayEquals(new float[0], floats, 0.0f); + } + + @Override + public void onFailure(Exception e) { + fail(); + } + }); + verify(mockClient).execute( + eq(GetInferenceModelAction.INSTANCE), + argThat(actionRequest -> ((GetInferenceModelAction.Request) actionRequest).getTaskType().equals(TaskType.RERANK)), + any() + ); + } + } diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml index 6d3c1231440fb..530be2341c9c8 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml @@ -38,8 +38,8 @@ setup: id: doc_1 body: text: "As seen from Earth, a solar eclipse happens when the Moon is directly between the Earth and the Sun." - topic: ["science"] - subtopic: ["technology"] + topic: [ "science" ] + subtopic: [ "technology" ] refresh: true - do: @@ -48,8 +48,8 @@ setup: id: doc_2 body: text: "The phases of the Moon come from the position of the Moon relative to the Earth and Sun." - topic: ["science"] - subtopic: ["astronomy"] + topic: [ "science" ] + subtopic: [ "astronomy" ] refresh: true - do: @@ -58,7 +58,7 @@ setup: id: doc_3 body: text: "Sun Moon Lake is a lake in Nantou County, Taiwan. It is the largest lake in Taiwan." - topic: ["geography"] + topic: [ "geography" ] refresh: true --- "Simple text similarity rank retriever": @@ -82,7 +82,7 @@ setup: field: text size: 10 - - match: { hits.total.value : 2 } + - match: { hits.total.value: 2 } - length: { hits.hits: 2 } - match: { hits.hits.0._id: "doc_2" } @@ -118,9 +118,62 @@ setup: field: text size: 10 - - match: { hits.total.value : 1 } + - match: { hits.total.value: 1 } - length: { hits.hits: 1 } - match: { hits.hits.0._id: "doc_1" } - match: { hits.hits.0._rank: 1 } - close_to: { hits.hits.0._score: { value: 0.2, error: 0.001 } } + + +--- +"Text similarity reranking fails if the inference ID does not exist": + - do: + catch: /Inference endpoint not found/ + search: + index: test-index + body: + track_total_hits: true + fields: [ "text", "topic" ] + retriever: + text_similarity_reranker: + retriever: + standard: + query: + term: + topic: "science" + filter: + term: + subtopic: "technology" + rank_window_size: 10 + inference_id: i-dont-exist + inference_text: "How often does the moon hide the sun?" + field: text + size: 10 + +--- +"Text similarity reranking fails if the inference ID does not exist and result set is empty": + - requires: + cluster_features: "gte_v8.15.1" + reason: bug fixed in 8.15.1 + + - do: + catch: /Inference endpoint not found/ + search: + index: test-index + body: + track_total_hits: true + fields: [ "text", "topic" ] + retriever: + text_similarity_reranker: + retriever: + standard: + query: + term: + topic: "asdfasdf" + rank_window_size: 10 + inference_id: i-dont-exist + inference_text: "asdfasdf" + field: text + size: 10 + From 3c92797d0d323de25116c4c0a6d3758f5b1b37ac Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Fri, 23 Aug 2024 17:53:07 +0300 Subject: [PATCH 174/389] Allow warnings for template conflicts (#112145) Fixes #112143 --- .../test/index/91_metrics_no_subobjects.yml | 16 ++++++++++++---- .../test/index/92_metrics_auto_subobjects.yml | 16 ++++++++++++---- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml index ca6d65349c923..5881ec83ebe85 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml @@ -1,11 +1,13 @@ --- "Metrics object indexing": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["gte_v8.3.0"] reason: added in 8.3.0 - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" indices.put_index_template: name: test body: @@ -66,11 +68,13 @@ --- "Root without subobjects": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["gte_v8.3.0"] reason: added in 8.3.0 - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" indices.put_index_template: name: test body: @@ -126,11 +130,13 @@ --- "Metrics object indexing with synthetic source": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["gte_v8.4.0"] reason: added in 8.4.0 - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" indices.put_index_template: name: test body: @@ -194,11 +200,13 @@ --- "Root without subobjects with synthetic source": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["gte_v8.4.0"] reason: added in 8.4.0 - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" indices.put_index_template: name: test body: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml index e4fee3569fef2..414c24cfffd7d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml @@ -1,11 +1,13 @@ --- "Metrics object indexing": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["mapper.subobjects_auto"] reason: requires supporting subobjects auto setting - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" indices.put_index_template: name: test body: @@ -66,11 +68,13 @@ --- "Root with metrics": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["mapper.subobjects_auto"] reason: requires supporting subobjects auto setting - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" indices.put_index_template: name: test body: @@ -126,11 +130,13 @@ --- "Metrics object indexing with synthetic source": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["mapper.subobjects_auto"] reason: added in 8.4.0 - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" indices.put_index_template: name: test body: @@ -194,11 +200,13 @@ --- "Root without subobjects with synthetic source": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["mapper.subobjects_auto"] reason: added in 8.4.0 - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" indices.put_index_template: name: test body: From d71654195c38fb8cc0806c2a27689d59f8ffd1c6 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Fri, 23 Aug 2024 17:23:57 +0100 Subject: [PATCH 175/389] [DOCS] Wrap document/field restriction tip in IMPORTANT block (#112146) --- .../authorization/field-and-document-access-control.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/reference/security/authorization/field-and-document-access-control.asciidoc b/docs/reference/security/authorization/field-and-document-access-control.asciidoc index f4d4fcd49a35f..7c7ea75ece161 100644 --- a/docs/reference/security/authorization/field-and-document-access-control.asciidoc +++ b/docs/reference/security/authorization/field-and-document-access-control.asciidoc @@ -54,8 +54,11 @@ specify any field restrictions. If you assign a user both roles, `role_a` gives the user access to all documents and `role_b` gives the user access to all fields. +[IMPORTANT] +=========== If you need to restrict access to both documents and fields, consider splitting documents by index instead. +=========== include::role-templates.asciidoc[] include::set-security-user.asciidoc[] From 1fb2afa3df8501044a0383d53fb2e712f579109f Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Fri, 23 Aug 2024 19:26:57 +0300 Subject: [PATCH 176/389] Re-enable yaml tests (#112157) Related to #112143 --- muted-tests.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 463075f1f93ae..a46456a6c9ad9 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -182,8 +182,6 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {stats.ByTwoCalculatedSecondOverwritesReferencingFirst SYNC} issue: https://github.com/elastic/elasticsearch/issues/112118 -- class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT - issue: https://github.com/elastic/elasticsearch/issues/112143 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/preview_transforms/Test preview transform latest} issue: https://github.com/elastic/elasticsearch/issues/112144 From 0aa4758f02f736176a0f1c211ca701465ad05f63 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 23 Aug 2024 11:16:18 -0700 Subject: [PATCH 177/389] Stop setting java.library.path (#112119) Native libraries in Java are loaded by calling System.loadLibrary. This method inspects paths in the java.library.path to find the requested library. Elasticsearch previously used this to find libsystemd, but now the only remaining use is to set the additional platform directory in which Elasticsearch keeps its own native libraries. One issue with setting java.library.path is that its not set for the cli process, which makes loading the native library infrastructure from clis difficult. This commit reworks how Elasticsearch native libraries are found in order to avoid needing to set java.library.path. There are two cases. The simplest is production, where the working directory is the Elasticsearch installation directory, so the platform specific directory can be constructed. The second case is for tests where we don't have an installtion. We already pass in java.library.path there, so this change renames the system property to be a test specific property that the new loading infrastructure looks for. --- benchmarks/build.gradle | 2 +- .../src/main/groovy/elasticsearch.ide.gradle | 3 +- .../internal/ElasticsearchJavaBasePlugin.java | 4 +- .../gradle/internal/test/TestUtil.java | 4 +- .../server/cli/SystemJvmOptions.java | 41 ------------ .../server/cli/JvmOptionsParserTests.java | 48 +------------- .../nativeaccess/lib/LoaderHelper.java | 62 +++++++++++++++++++ .../nativeaccess/jdk/JdkVectorLibrary.java | 3 +- .../nativeaccess/jdk/JdkZstdLibrary.java | 3 +- .../VectorSystemPropertyTests.java | 2 +- 10 files changed, 72 insertions(+), 100 deletions(-) create mode 100644 libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 3f7ee8b60b53c..e2511438e7f95 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -77,7 +77,7 @@ tasks.named("run").configure { executable = "${BuildParams.runtimeJavaHome}/bin/java" args << "-Dplugins.dir=${buildDir}/plugins" << "-Dtests.index=${buildDir}/index" dependsOn "copyExpression", "copyPainless" - systemProperty 'java.library.path', file("../libs/native/libraries/build/platform/${platformName()}-${os.arch}") + systemProperty 'es.nativelibs.path', file("../libs/native/libraries/build/platform/${platformName()}-${os.arch}") } String platformName() { diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index 6cb22dad9bc79..285c3a61b08c2 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -168,8 +168,7 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { '-ea', '-Djava.security.manager=allow', '-Djava.locale.providers=SPI,COMPAT', - '-Djava.library.path=' + testLibraryPath, - '-Djna.library.path=' + testLibraryPath, + '-Des.nativelibs.path=' + testLibraryPath, // TODO: only open these for mockito when it is modularized '--add-opens=java.base/java.security.cert=ALL-UNNAMED', '--add-opens=java.base/java.nio.channels=ALL-UNNAMED', diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java index f95d9d72a473f..a3b1dd9731591 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java @@ -189,9 +189,7 @@ private static void configureNativeLibraryPath(Project project) { var libraryPath = (Supplier) () -> TestUtil.getTestLibraryPath(nativeConfigFiles.getAsPath()); test.dependsOn(nativeConfigFiles); - // we may use JNA or the JDK's foreign function api to load libraries, so we set both sysprops - systemProperties.systemProperty("java.library.path", libraryPath); - systemProperties.systemProperty("jna.library.path", libraryPath); + systemProperties.systemProperty("es.nativelibs.path", libraryPath); }); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java index 96fde95d0dd17..965f3964c9a38 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java @@ -11,7 +11,6 @@ import org.elasticsearch.gradle.Architecture; import org.elasticsearch.gradle.ElasticsearchDistribution; -import java.io.File; import java.util.Locale; public class TestUtil { @@ -19,8 +18,7 @@ public class TestUtil { public static String getTestLibraryPath(String nativeLibsDir) { String arch = Architecture.current().toString().toLowerCase(Locale.ROOT); String platform = String.format(Locale.ROOT, "%s-%s", ElasticsearchDistribution.CURRENT_PLATFORM, arch); - String existingLibraryPath = System.getProperty("java.library.path"); - return String.format(Locale.ROOT, "%s/%s%c%s", nativeLibsDir, platform, File.pathSeparatorChar, existingLibraryPath); + return String.format(Locale.ROOT, "%s/%s", nativeLibsDir, platform); } } diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index 2a89f18209d11..94e2d538c0ad0 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -10,11 +10,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.core.SuppressForbidden; -import java.io.File; -import java.nio.file.Path; -import java.nio.file.Paths; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -25,7 +21,6 @@ final class SystemJvmOptions { static List systemJvmOptions(Settings nodeSettings, final Map sysprops) { String distroType = sysprops.get("es.distribution.type"); boolean isHotspot = sysprops.getOrDefault("sun.management.compiler", "").contains("HotSpot"); - String libraryPath = findLibraryPath(sysprops); return Stream.concat( Stream.of( @@ -73,8 +68,6 @@ static List systemJvmOptions(Settings nodeSettings, final Map TEST_SYSPROPS = Map.of( - "os.name", - "Linux", - "os.arch", - "aarch64", - "java.library.path", - "/usr/lib" - ); + private static final Map TEST_SYSPROPS = Map.of("os.name", "Linux", "os.arch", "aarch64"); public void testSubstitution() { final List jvmOptions = JvmOptionsParser.substitutePlaceholders( @@ -390,40 +380,4 @@ public void testCommandLineDistributionType() { final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY, sysprops); assertThat(jvmOptions, hasItem("-Des.distribution.type=testdistro")); } - - public void testLibraryPath() { - assertLibraryPath("Mac OS", "aarch64", "darwin-aarch64"); - assertLibraryPath("Mac OS", "amd64", "darwin-x64"); - assertLibraryPath("Mac OS", "x86_64", "darwin-x64"); - assertLibraryPath("Linux", "aarch64", "linux-aarch64"); - assertLibraryPath("Linux", "amd64", "linux-x64"); - assertLibraryPath("Linux", "x86_64", "linux-x64"); - assertLibraryPath("Windows", "amd64", "windows-x64"); - assertLibraryPath("Windows", "x86_64", "windows-x64"); - assertLibraryPath("Unknown", "aarch64", "unsupported_os[Unknown]-aarch64"); - assertLibraryPath("Mac OS", "Unknown", "darwin-unsupported_arch[Unknown]"); - } - - private void assertLibraryPath(String os, String arch, String expected) { - String existingPath = "/usr/lib"; - var sysprops = Map.of("os.name", os, "os.arch", arch, "java.library.path", existingPath); - final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY, sysprops); - Map options = new HashMap<>(); - for (var jvmOption : jvmOptions) { - if (jvmOption.startsWith("-D")) { - String[] parts = jvmOption.substring(2).split("="); - assert parts.length == 2; - options.put(parts[0], parts[1]); - } - } - String separator = FileSystems.getDefault().getSeparator(); - assertThat( - options, - hasEntry(equalTo("java.library.path"), allOf(containsString("platform" + separator + expected), containsString(existingPath))) - ); - assertThat( - options, - hasEntry(equalTo("jna.library.path"), allOf(containsString("platform" + separator + expected), containsString(existingPath))) - ); - } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java new file mode 100644 index 0000000000000..4da52c415c040 --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.lib; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; + +/** + * A utility for loading libraries from Elasticsearch's platform specific lib dir. + */ +public class LoaderHelper { + private static final Path platformLibDir = findPlatformLibDir(); + + private static Path findPlatformLibDir() { + // tests don't have an ES install, so the platform dir must be passed in explicitly + String path = System.getProperty("es.nativelibs.path"); + if (path != null) { + return Paths.get(path); + } + + Path platformDir = Paths.get("lib", "platform"); + + String osname = System.getProperty("os.name"); + String os; + if (osname.startsWith("Windows")) { + os = "windows"; + } else if (osname.startsWith("Linux")) { + os = "linux"; + } else if (osname.startsWith("Mac OS")) { + os = "darwin"; + } else { + os = "unsupported_os[" + osname + "]"; + } + String archname = System.getProperty("os.arch"); + String arch; + if (archname.equals("amd64") || archname.equals("x86_64")) { + arch = "x64"; + } else if (archname.equals("aarch64")) { + arch = archname; + } else { + arch = "unsupported_arch[" + archname + "]"; + } + return platformDir.resolve(os + "-" + arch); + } + + public static void loadLibrary(String libname) { + Path libpath = platformLibDir.resolve(System.mapLibraryName(libname)); + if (Files.exists(libpath) == false) { + throw new UnsatisfiedLinkError("Native library [" + libpath + "] does not exist"); + } + System.load(libpath.toAbsolutePath().toString()); + } + + private LoaderHelper() {} // no construction +} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java index c92ad654c9b9a..a1032f1381d94 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java @@ -9,6 +9,7 @@ package org.elasticsearch.nativeaccess.jdk; import org.elasticsearch.nativeaccess.VectorSimilarityFunctions; +import org.elasticsearch.nativeaccess.lib.LoaderHelper; import org.elasticsearch.nativeaccess.lib.VectorLibrary; import java.lang.foreign.FunctionDescriptor; @@ -29,7 +30,7 @@ public final class JdkVectorLibrary implements VectorLibrary { static final VectorSimilarityFunctions INSTANCE; static { - System.loadLibrary("vec"); + LoaderHelper.loadLibrary("vec"); final MethodHandle vecCaps$mh = downcallHandle("vec_caps", FunctionDescriptor.of(JAVA_INT)); try { diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java index e3e972bc19d72..284ac134d2036 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java @@ -9,6 +9,7 @@ package org.elasticsearch.nativeaccess.jdk; import org.elasticsearch.nativeaccess.CloseableByteBuffer; +import org.elasticsearch.nativeaccess.lib.LoaderHelper; import org.elasticsearch.nativeaccess.lib.ZstdLibrary; import java.lang.foreign.FunctionDescriptor; @@ -24,7 +25,7 @@ class JdkZstdLibrary implements ZstdLibrary { static { - System.loadLibrary("zstd"); + LoaderHelper.loadLibrary("zstd"); } private static final MethodHandle compressBound$mh = downcallHandle("ZSTD_compressBound", FunctionDescriptor.of(JAVA_LONG, JAVA_INT)); diff --git a/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSystemPropertyTests.java b/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSystemPropertyTests.java index 9875878d8658a..cda4fc8c55444 100644 --- a/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSystemPropertyTests.java +++ b/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSystemPropertyTests.java @@ -49,7 +49,7 @@ public void testSystemPropertyDisabled() throws Exception { "-Xms4m", "-cp", jarPath + File.pathSeparator + System.getProperty("java.class.path"), - "-Djava.library.path=" + System.getProperty("java.library.path"), + "-Des.nativelibs.path=" + System.getProperty("es.nativelibs.path"), "p.Test" ).start(); String output = new String(process.getInputStream().readAllBytes(), UTF_8); From 75b0c5d13b93cc2092896e75c52af20e5bed3928 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Fri, 23 Aug 2024 15:00:05 -0400 Subject: [PATCH 178/389] Ignore under construction data types for params checking (#112163) This should prevent tests failing when we've added a type to the params annotation but the data type is disabled via feature flag. --- .../function/AbstractFunctionTestCase.java | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index efb078cbe80e0..c79f5ab8d086b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -680,6 +680,10 @@ public void testSerializationOfSimple() { assertSerialization(buildFieldExpression(testCase)); } + /** + * This test is meant to validate that the params annotations for the function being tested align with the supported types the + * test framework has detected. + */ @AfterClass public static void testFunctionInfo() { Logger log = LogManager.getLogger(getTestClass()); @@ -717,14 +721,23 @@ public static void testFunctionInfo() { for (int i = 0; i < args.size(); i++) { EsqlFunctionRegistry.ArgSignature arg = args.get(i); - Set annotationTypes = Arrays.stream(arg.type()).collect(Collectors.toCollection(TreeSet::new)); - Set signatureTypes = typesFromSignature.get(i); + Set annotationTypes = Arrays.stream(arg.type()) + .filter(DataType.UNDER_CONSTRUCTION::containsKey) + .collect(Collectors.toCollection(TreeSet::new)); + Set signatureTypes = typesFromSignature.get(i) + .stream() + .filter(DataType.UNDER_CONSTRUCTION::containsKey) + .collect(Collectors.toCollection(TreeSet::new)); if (signatureTypes.isEmpty()) { log.info("{}: skipping", arg.name()); continue; } log.info("{}: tested {} vs annotated {}", arg.name(), signatureTypes, annotationTypes); - assertEquals(signatureTypes, annotationTypes); + assertEquals( + "Missmatch between actual and declared parameter types. You probably need to update your @params annotations.", + signatureTypes, + annotationTypes + ); } Set returnTypes = Arrays.stream(description.returnType()).collect(Collectors.toCollection(TreeSet::new)); From 0f6529dec5ff3d3f4ca7d0aaef68e2675dd506b2 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 23 Aug 2024 20:06:27 +0100 Subject: [PATCH 179/389] Improve threading when restoring snapshot (#112162) We use the `SNAPSHOT_META` pool for some of the work needed to start a snapshot restore, but it's a little tangled with other work happening on a `transport_worker`, or some other random threadpool on which `getRepositoryData` completes. This commit ensures that we use `SNAPSHOT_META` throughout. Relates #101445 --- .../TransportRestoreSnapshotAction.java | 3 +- .../snapshots/RestoreService.java | 54 +++++++++++-------- .../snapshots/RestoreServiceTests.java | 15 +++++- .../ccr/action/TransportPutFollowAction.java | 10 ++-- 4 files changed, 53 insertions(+), 29 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java index d7a14362026ef..ba34b8cab1021 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.tasks.Task; @@ -49,7 +48,7 @@ public TransportRestoreSnapshotAction( RestoreSnapshotRequest::new, indexNameExpressionResolver, RestoreSnapshotResponse::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE + threadPool.executor(ThreadPool.Names.SNAPSHOT_META) ); this.restoreService = restoreService; } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 25796606f2b1b..0f03cfab4ad2e 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -56,7 +56,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentHelper; @@ -95,6 +94,7 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.Executor; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.stream.Collectors; @@ -188,6 +188,8 @@ public final class RestoreService implements ClusterStateApplier { private final ThreadPool threadPool; + private final Executor snapshotMetaExecutor; + private volatile boolean refreshRepositoryUuidOnRestore; public RestoreService( @@ -216,6 +218,7 @@ public RestoreService( this.indicesService = indicesService; this.fileSettingsService = fileSettingsService; this.threadPool = threadPool; + this.snapshotMetaExecutor = threadPool.executor(ThreadPool.Names.SNAPSHOT_META); this.refreshRepositoryUuidOnRestore = REFRESH_REPO_UUID_ON_RESTORE_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() .addSettingsUpdateConsumer(REFRESH_REPO_UUID_ON_RESTORE_SETTING, this::setRefreshRepositoryUuidOnRestore); @@ -244,24 +247,28 @@ public void restoreSnapshot( final ActionListener listener, final BiConsumer updater ) { + assert Repository.assertSnapshotMetaThread(); try { // Try and fill in any missing repository UUIDs in case they're needed during the restore final var repositoryUuidRefreshStep = new ListenableFuture(); - refreshRepositoryUuids(refreshRepositoryUuidOnRestore, repositoriesService, () -> repositoryUuidRefreshStep.onResponse(null)); + refreshRepositoryUuids( + refreshRepositoryUuidOnRestore, + repositoriesService, + () -> repositoryUuidRefreshStep.onResponse(null), + snapshotMetaExecutor + ); // Read snapshot info and metadata from the repository final String repositoryName = request.repository(); Repository repository = repositoriesService.repository(repositoryName); final ListenableFuture repositoryDataListener = new ListenableFuture<>(); - repository.getRepositoryData( - EsExecutors.DIRECT_EXECUTOR_SERVICE, // TODO contemplate threading here, do we need to fork, see #101445? - repositoryDataListener - ); + repository.getRepositoryData(snapshotMetaExecutor, repositoryDataListener); repositoryDataListener.addListener( listener.delegateFailureAndWrap( (delegate, repositoryData) -> repositoryUuidRefreshStep.addListener( delegate.delegateFailureAndWrap((subDelegate, ignored) -> { + assert Repository.assertSnapshotMetaThread(); final String snapshotName = request.snapshot(); final Optional matchingSnapshotId = repositoryData.getSnapshotIds() .stream() @@ -511,12 +518,18 @@ private void setRefreshRepositoryUuidOnRestore(boolean refreshRepositoryUuidOnRe * Best-effort attempt to make sure that we know all the repository UUIDs. Calls {@link Repository#getRepositoryData} on every * {@link BlobStoreRepository} with a missing UUID. * - * @param enabled If {@code false} this method completes the listener immediately + * @param enabled If {@code false} this method completes the listener immediately * @param repositoriesService Supplies the repositories to check - * @param onCompletion Action that is executed when all repositories have been refreshed. + * @param onCompletion Action that is executed when all repositories have been refreshed. + * @param responseExecutor Executor on which to execute {@code onCompletion} if not using the calling thread. */ // Exposed for tests - static void refreshRepositoryUuids(boolean enabled, RepositoriesService repositoriesService, Runnable onCompletion) { + static void refreshRepositoryUuids( + boolean enabled, + RepositoriesService repositoriesService, + Runnable onCompletion, + Executor responseExecutor + ) { try (var refs = new RefCountingRunnable(onCompletion)) { if (enabled == false) { logger.debug("repository UUID refresh is disabled"); @@ -530,20 +543,17 @@ static void refreshRepositoryUuids(boolean enabled, RepositoriesService reposito if (repository instanceof BlobStoreRepository && repository.getMetadata().uuid().equals(RepositoryData.MISSING_UUID)) { final var repositoryName = repository.getMetadata().name(); logger.info("refreshing repository UUID for repository [{}]", repositoryName); - repository.getRepositoryData( - EsExecutors.DIRECT_EXECUTOR_SERVICE, // TODO contemplate threading here, do we need to fork, see #101445? - ActionListener.releaseAfter(new ActionListener<>() { - @Override - public void onResponse(RepositoryData repositoryData) { - logger.debug(() -> format("repository UUID [%s] refresh completed", repositoryName)); - } + repository.getRepositoryData(responseExecutor, ActionListener.releaseAfter(new ActionListener<>() { + @Override + public void onResponse(RepositoryData repositoryData) { + logger.debug(() -> format("repository UUID [%s] refresh completed", repositoryName)); + } - @Override - public void onFailure(Exception e) { - logger.debug(() -> format("repository UUID [%s] refresh failed", repositoryName), e); - } - }, refs.acquire()) - ); + @Override + public void onFailure(Exception e) { + logger.debug(() -> format("repository UUID [%s] refresh failed", repositoryName), e); + } + }, refs.acquire())); } } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java index 0d0293b962609..726d8fce4fc44 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.RepositoriesService; @@ -159,7 +160,12 @@ public void testPrefixNotChanged() { public void testRefreshRepositoryUuidsDoesNothingIfDisabled() { final RepositoriesService repositoriesService = mock(RepositoriesService.class); final AtomicBoolean called = new AtomicBoolean(); - RestoreService.refreshRepositoryUuids(false, repositoriesService, () -> assertTrue(called.compareAndSet(false, true))); + RestoreService.refreshRepositoryUuids( + false, + repositoriesService, + () -> assertTrue(called.compareAndSet(false, true)), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); assertTrue(called.get()); verifyNoMoreInteractions(repositoriesService); } @@ -209,7 +215,12 @@ public void testRefreshRepositoryUuidsRefreshesAsNeeded() { final RepositoriesService repositoriesService = mock(RepositoriesService.class); when(repositoriesService.getRepositories()).thenReturn(repositories); final AtomicBoolean completed = new AtomicBoolean(); - RestoreService.refreshRepositoryUuids(true, repositoriesService, () -> assertTrue(completed.compareAndSet(false, true))); + RestoreService.refreshRepositoryUuids( + true, + repositoriesService, + () -> assertTrue(completed.compareAndSet(false, true)), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); assertTrue(completed.get()); assertThat(pendingRefreshes, empty()); finalAssertions.forEach(Runnable::run); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index f31916cc7cf82..d8e634a297bfa 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreClusterStateListener; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.elasticsearch.action.support.ActionFilters; @@ -206,15 +207,17 @@ private void createFollowerIndex( ActionListener delegatelistener = listener.delegateFailure( (delegatedListener, response) -> afterRestoreStarted(clientWithHeaders, request, delegatedListener, response) ); + + final BiConsumer updater; if (remoteDataStream == null) { // If the index we're following is not part of a data stream, start the // restoration of the index normally. - restoreService.restoreSnapshot(restoreRequest, delegatelistener); + updater = (clusterState, mdBuilder) -> {}; } else { String followerIndexName = request.getFollowerIndex(); // This method is used to update the metadata in the same cluster state // update as the snapshot is restored. - BiConsumer updater = (currentState, mdBuilder) -> { + updater = (currentState, mdBuilder) -> { final String localDataStreamName; // If we have been given a data stream name, use that name for the local @@ -239,8 +242,9 @@ private void createFollowerIndex( ); mdBuilder.put(updatedDataStream); }; - restoreService.restoreSnapshot(restoreRequest, delegatelistener, updater); } + threadPool.executor(ThreadPool.Names.SNAPSHOT_META) + .execute(ActionRunnable.wrap(delegatelistener, l -> restoreService.restoreSnapshot(restoreRequest, l, updater))); } private void afterRestoreStarted( From 8c85d442b15170aa645e4aa38a5c14130d0df1ce Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 23 Aug 2024 15:23:25 -0400 Subject: [PATCH 180/389] ESQL: Lock some data types in profile test (#112165) The test for the output from `profile` can sometimes return `long` and sometimes return `int`. That's fine, really. It just makes testing annoying. This promotes the types to always be a `long` in the test. Closes #112049 Closes #112039 --- .../xpack/esql/qa/single_node/RestEsqlIT.java | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java index b0fa233965da6..44550c62bd7c5 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java @@ -294,6 +294,7 @@ public void testProfile() throws IOException { @SuppressWarnings("unchecked") List> profiles = (List>) ((Map) result.get("profile")).get("drivers"); for (Map p : profiles) { + fixTypesOnProfile(p); assertThat(p, commonProfile()); List sig = new ArrayList<>(); @SuppressWarnings("unchecked") @@ -353,6 +354,7 @@ public void testInlineStatsProfile() throws IOException { @SuppressWarnings("unchecked") List> profiles = (List>) ((Map) result.get("profile")).get("drivers"); for (Map p : profiles) { + fixTypesOnProfile(p); assertThat(p, commonProfile()); List sig = new ArrayList<>(); @SuppressWarnings("unchecked") @@ -457,6 +459,7 @@ public void testForceSleepsProfile() throws IOException { List> profiles = (List>) ((Map) result.get("profile")).get("drivers"); for (Map p : profiles) { + fixTypesOnProfile(p); assertMap(p, commonProfile()); @SuppressWarnings("unchecked") Map sleeps = (Map) p.get("sleeps"); @@ -497,13 +500,24 @@ public void testForceSleepsProfile() throws IOException { private MapMatcher commonProfile() { return matchesMap().entry("start_millis", greaterThan(0L)) .entry("stop_millis", greaterThan(0L)) - .entry("iterations", greaterThan(0)) - .entry("cpu_nanos", greaterThan(0)) - .entry("took_nanos", greaterThan(0)) + .entry("iterations", greaterThan(0L)) + .entry("cpu_nanos", greaterThan(0L)) + .entry("took_nanos", greaterThan(0L)) .entry("operators", instanceOf(List.class)) .entry("sleeps", matchesMap().extraOk()); } + /** + * Fix some of the types on the profile results. Sometimes they + * come back as integers and sometimes longs. This just promotes + * them to long every time. + */ + private void fixTypesOnProfile(Map profile) { + profile.put("iterations", ((Number) profile.get("iterations")).longValue()); + profile.put("cpu_nanos", ((Number) profile.get("cpu_nanos")).longValue()); + profile.put("took_nanos", ((Number) profile.get("took_nanos")).longValue()); + } + private String checkOperatorProfile(Map o) { String name = (String) o.get("operator"); name = name.replaceAll("\\[.+", ""); From 9d6bef1651c0db93b2eacd36ac6bc3adfb389102 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 23 Aug 2024 15:26:46 -0400 Subject: [PATCH 181/389] Docs: Scripted metric not available in serverless (#112161) This updates the docs to say that scripted metric is not available in serverless. --- .../metrics/scripted-metric-aggregation.asciidoc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc index d7d837b2f8364..16879450c65d8 100644 --- a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc @@ -6,6 +6,8 @@ A metric aggregation that executes using scripts to provide a metric output. +WARNING: `scripted_metric` is not available in {serverless-full}. + WARNING: Using scripts can result in slower search speeds. See <>. @@ -127,7 +129,7 @@ init_script:: Executed prior to any collection of documents. Allows the ag + In the above example, the `init_script` creates an array `transactions` in the `state` object. -map_script:: Executed once per document collected. This is a required script. +map_script:: Executed once per document collected. This is a required script. + In the above example, the `map_script` checks the value of the type field. If the value is 'sale' the value of the amount field is added to the transactions array. If the value of the type field is not 'sale' the negated value of the amount field is added @@ -282,4 +284,4 @@ params:: Optional. An object whose contents will be passed as variable If a parent bucket of the scripted metric aggregation does not collect any documents an empty aggregation response will be returned from the shard with a `null` value. In this case the `reduce_script`'s `states` variable will contain `null` as a response from that shard. -`reduce_script`'s should therefore expect and deal with `null` responses from shards. +`reduce_script`'s should therefore expect and deal with `null` responses from shards. From 2c9406861c4b2b0c7274b4aa8c10fa446fb17302 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 23 Aug 2024 13:30:28 -0700 Subject: [PATCH 182/389] Allow query pragmas in release builds (#111953) I have investigated an issue with QA clusters that run release builds. I wish I could enable query pragmas to confirm the problem instead of setting up new clusters and replicating data before testing the theory. This change allows users to enable query pragmas in release builds. However, due to the risks associated with using pragmas, the accept_pragma_risks parameter must be explicitly set to true to proceed. --- .../elasticsearch/xpack/esql/action/EsqlQueryRequest.java | 8 +++++++- .../elasticsearch/xpack/esql/action/RequestXContent.java | 2 ++ .../xpack/esql/action/EsqlQueryRequestTests.java | 3 +++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java index 5c9b4244ec0ca..4ab310863c61d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java @@ -50,6 +50,7 @@ public class EsqlQueryRequest extends org.elasticsearch.xpack.core.esql.action.E private TimeValue keepAlive = DEFAULT_KEEP_ALIVE; private boolean keepOnCompletion; private boolean onSnapshotBuild = Build.current().isSnapshot(); + private boolean acceptedPragmaRisks = false; /** * "Tables" provided in the request for use with things like {@code LOOKUP}. @@ -78,8 +79,9 @@ public ActionRequestValidationException validate() { if (Strings.hasText(query) == false) { validationException = addValidationError("[" + RequestXContent.QUERY_FIELD + "] is required", validationException); } + if (onSnapshotBuild == false) { - if (pragmas.isEmpty() == false) { + if (pragmas.isEmpty() == false && acceptedPragmaRisks == false) { validationException = addValidationError( "[" + RequestXContent.PRAGMA_FIELD + "] only allowed in snapshot builds", validationException @@ -230,4 +232,8 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, void onSnapshotBuild(boolean onSnapshotBuild) { this.onSnapshotBuild = onSnapshotBuild; } + + void acceptedPragmaRisks(boolean accepted) { + this.acceptedPragmaRisks = accepted; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java index 4c511a4450bc8..810e313002189 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java @@ -69,6 +69,7 @@ String fields() { private static final ParseField PARAMS_FIELD = new ParseField("params"); private static final ParseField LOCALE_FIELD = new ParseField("locale"); private static final ParseField PROFILE_FIELD = new ParseField("profile"); + private static final ParseField ACCEPT_PRAGMA_RISKS = new ParseField("accept_pragma_risks"); static final ParseField TABLES_FIELD = new ParseField("tables"); static final ParseField WAIT_FOR_COMPLETION_TIMEOUT = new ParseField("wait_for_completion_timeout"); @@ -92,6 +93,7 @@ private static void objectParserCommon(ObjectParser parser) parser.declareString(EsqlQueryRequest::query, QUERY_FIELD); parser.declareBoolean(EsqlQueryRequest::columnar, COLUMNAR_FIELD); parser.declareObject(EsqlQueryRequest::filter, (p, c) -> AbstractQueryBuilder.parseTopLevelQuery(p), FILTER_FIELD); + parser.declareBoolean(EsqlQueryRequest::acceptedPragmaRisks, ACCEPT_PRAGMA_RISKS); parser.declareObject( EsqlQueryRequest::pragmas, (p, c) -> new QueryPragmas(Settings.builder().loadFromMap(p.map()).build()), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java index 890a611fdea10..b1dff5ce8c342 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java @@ -302,6 +302,9 @@ public void testPragmasOnlyValidOnSnapshot() throws IOException { request.onSnapshotBuild(false); assertNotNull(request.validate()); assertThat(request.validate().getMessage(), containsString("[pragma] only allowed in snapshot builds")); + + request.acceptedPragmaRisks(true); + assertNull(request.validate()); } public void testTablesKeyword() throws IOException { From 150235223e0f9e546d2c9773ad951b9262679754 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 24 Aug 2024 15:59:34 +1000 Subject: [PATCH 183/389] Mute org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT test {mv_percentile.FromIndex SYNC} #112180 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index a46456a6c9ad9..f68e0f395a9b4 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -187,6 +187,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/112144 - class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/112147 +- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT + method: test {mv_percentile.FromIndex SYNC} + issue: https://github.com/elastic/elasticsearch/issues/112180 # Examples: # From 4c200f60a8e78366ad05aef5590ef9e007fd59ec Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Sat, 24 Aug 2024 15:09:50 +0200 Subject: [PATCH 184/389] Make ClusterSettings.BUILT_IN_CLUSTER_SETTINGS immutable (#112185) This should be a final and immutable field. Also the way this thing was constructed is weird, we don't expect any null values for constants. --- .../org/elasticsearch/common/settings/ClusterSettings.java | 7 ++----- .../application/rules/QueryRulesIndexServiceTests.java | 3 ++- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index c023b00ec820f..8d9d8452b12bb 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -130,11 +130,8 @@ import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.watcher.ResourceWatcherService; -import java.util.Objects; import java.util.Set; import java.util.function.Predicate; -import java.util.stream.Collectors; -import java.util.stream.Stream; /** * Encapsulates all valid cluster level settings. @@ -205,7 +202,7 @@ public void apply(Settings value, Settings current, Settings previous) { } } - public static Set> BUILT_IN_CLUSTER_SETTINGS = Stream.of( + public static final Set> BUILT_IN_CLUSTER_SETTINGS = Set.of( AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, @@ -602,5 +599,5 @@ public void apply(Settings value, Settings current, Settings previous) { TransportService.ENABLE_STACK_OVERFLOW_AVOIDANCE, DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING, DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING - ).filter(Objects::nonNull).collect(Collectors.toSet()); + ); } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexServiceTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexServiceTests.java index 36d5bb91e619d..4de5445871739 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexServiceTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexServiceTests.java @@ -27,6 +27,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -50,7 +51,7 @@ public class QueryRulesIndexServiceTests extends ESSingleNodeTestCase { @Before public void setup() { - Set> settingsSet = ClusterSettings.BUILT_IN_CLUSTER_SETTINGS; + Set> settingsSet = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); settingsSet.addAll(QueryRulesConfig.getSettings()); ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, settingsSet); this.queryRulesIndexService = new QueryRulesIndexService(client(), clusterSettings); From 22ca3810f911035ad5cbecf36500423e3c1e54b1 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Sat, 24 Aug 2024 15:17:11 +0200 Subject: [PATCH 185/389] Make slow logger's Logger instances static (#112183) These can be made static now that they aren't index specific any longer, saving measurable time in test execution + it's just the right thing to do here. --- .../elasticsearch/index/IndexingSlowLog.java | 8 +++++--- .../elasticsearch/index/SearchSlowLog.java | 19 +++++++++---------- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index 14c2c5440bd24..b4a7571e96802 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -90,7 +90,11 @@ public final class IndexingSlowLog implements IndexingOperationListener { Property.IndexSettingDeprecatedInV7AndRemovedInV8 ); - private final Logger indexLogger; + private static final Logger indexLogger = LogManager.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index"); + static { + Loggers.setLevel(indexLogger, Level.TRACE); + } + private final Index index; private boolean reformat; @@ -127,8 +131,6 @@ public final class IndexingSlowLog implements IndexingOperationListener { IndexingSlowLog(IndexSettings indexSettings, SlowLogFieldProvider slowLogFieldProvider) { this.slowLogFieldProvider = slowLogFieldProvider; - this.indexLogger = LogManager.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index"); - Loggers.setLevel(this.indexLogger, Level.TRACE); this.index = indexSettings.getIndex(); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, this::setReformat); diff --git a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java index eb227e6e1136d..6aff86d32c5a3 100644 --- a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -41,12 +41,17 @@ public final class SearchSlowLog implements SearchOperationListener { private long fetchDebugThreshold; private long fetchTraceThreshold; - private final Logger queryLogger; - private final Logger fetchLogger; + static final String INDEX_SEARCH_SLOWLOG_PREFIX = "index.search.slowlog"; - private final SlowLogFieldProvider slowLogFieldProvider; + private static final Logger queryLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query"); + private static final Logger fetchLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch"); - static final String INDEX_SEARCH_SLOWLOG_PREFIX = "index.search.slowlog"; + static { + Loggers.setLevel(queryLogger, Level.TRACE); + Loggers.setLevel(fetchLogger, Level.TRACE); + } + + private final SlowLogFieldProvider slowLogFieldProvider; public static final Setting INDEX_SEARCH_SLOWLOG_INCLUDE_USER_SETTING = Setting.boolSetting( INDEX_SEARCH_SLOWLOG_PREFIX + ".include.user", @@ -130,12 +135,6 @@ public final class SearchSlowLog implements SearchOperationListener { public SearchSlowLog(IndexSettings indexSettings, SlowLogFieldProvider slowLogFieldProvider) { slowLogFieldProvider.init(indexSettings); this.slowLogFieldProvider = slowLogFieldProvider; - - this.queryLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query"); - this.fetchLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch"); - Loggers.setLevel(this.fetchLogger, Level.TRACE); - Loggers.setLevel(this.queryLogger, Level.TRACE); - indexSettings.getScopedSettings() .addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING, this::setQueryWarnThreshold); this.queryWarnThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING).nanos(); From ecd793067bcc646bb51ab70531672722e8ba8692 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sun, 25 Aug 2024 06:47:05 +1000 Subject: [PATCH 186/389] Mute org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT test {mv_percentile.FromIndex SYNC} #112187 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index f68e0f395a9b4..c8f7abebfa391 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -190,6 +190,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT method: test {mv_percentile.FromIndex SYNC} issue: https://github.com/elastic/elasticsearch/issues/112180 +- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT + method: test {mv_percentile.FromIndex SYNC} + issue: https://github.com/elastic/elasticsearch/issues/112187 # Examples: # From d053d39472abbe2cdbbff5077f31a861d2ed03ad Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sun, 25 Aug 2024 06:47:15 +1000 Subject: [PATCH 187/389] Mute org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT test {mv_percentile.FromIndex ASYNC} #112188 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index c8f7abebfa391..9708b43fbfdfa 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -193,6 +193,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {mv_percentile.FromIndex SYNC} issue: https://github.com/elastic/elasticsearch/issues/112187 +- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT + method: test {mv_percentile.FromIndex ASYNC} + issue: https://github.com/elastic/elasticsearch/issues/112188 # Examples: # From 48dabe851c7944fa7b5d92b50485e9dd22906c76 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sun, 25 Aug 2024 14:17:01 +1000 Subject: [PATCH 188/389] Mute org.elasticsearch.smoketest.WatcherYamlRestIT test {p0=watcher/usage/10_basic/Test watcher usage stats output} #112189 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 9708b43fbfdfa..0557cc1b7408a 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -196,6 +196,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {mv_percentile.FromIndex ASYNC} issue: https://github.com/elastic/elasticsearch/issues/112188 +- class: org.elasticsearch.smoketest.WatcherYamlRestIT + method: test {p0=watcher/usage/10_basic/Test watcher usage stats output} + issue: https://github.com/elastic/elasticsearch/issues/112189 # Examples: # From 18662335ffe00db0e79141b93c3c0bd3021fd585 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sun, 25 Aug 2024 22:44:12 +1000 Subject: [PATCH 189/389] Mute org.elasticsearch.xpack.test.rest.XPackRestIT test {p0=ml/inference_processor/Test create processor with missing mandatory fields} #112191 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 0557cc1b7408a..753e7006863b9 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -199,6 +199,9 @@ tests: - class: org.elasticsearch.smoketest.WatcherYamlRestIT method: test {p0=watcher/usage/10_basic/Test watcher usage stats output} issue: https://github.com/elastic/elasticsearch/issues/112189 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=ml/inference_processor/Test create processor with missing mandatory fields} + issue: https://github.com/elastic/elasticsearch/issues/112191 # Examples: # From 6f539585d56dd1d12f270745ac878309208e4e88 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Mon, 26 Aug 2024 06:44:18 +1000 Subject: [PATCH 190/389] Mute org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT test {mv_percentile.FromIndex ASYNC} #112193 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 753e7006863b9..68a53e5cb04dd 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -202,6 +202,9 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=ml/inference_processor/Test create processor with missing mandatory fields} issue: https://github.com/elastic/elasticsearch/issues/112191 +- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT + method: test {mv_percentile.FromIndex ASYNC} + issue: https://github.com/elastic/elasticsearch/issues/112193 # Examples: # From 29453cb2cea93010654fcc0f6d5936f67d0b0acd Mon Sep 17 00:00:00 2001 From: Panos Koutsovasilis Date: Mon, 26 Aug 2024 08:37:40 +0300 Subject: [PATCH 191/389] fix: support all allowed protocol numbers (#111528) * fix(CommunityIdProcessor): support all allowed protocol numbers * fix(CommunityIdProcessor): update documentation --- .../ingest/processors/community-id.asciidoc | 7 +- .../ingest/common/CommunityIdProcessor.java | 138 +++++++++++------- .../common/CommunityIdProcessorTests.java | 33 ++++- 3 files changed, 116 insertions(+), 62 deletions(-) diff --git a/docs/reference/ingest/processors/community-id.asciidoc b/docs/reference/ingest/processors/community-id.asciidoc index 03e65ac04a209..2d86bd21fa1e9 100644 --- a/docs/reference/ingest/processors/community-id.asciidoc +++ b/docs/reference/ingest/processors/community-id.asciidoc @@ -23,11 +23,12 @@ configuration is required. | `source_port` | no | `source.port` | Field containing the source port. | `destination_ip` | no | `destination.ip` | Field containing the destination IP address. | `destination_port` | no | `destination.port` | Field containing the destination port. -| `iana_number` | no | `network.iana_number` | Field containing the IANA number. The following protocol numbers are currently supported: `1` ICMP, `2` IGMP, `6` TCP, `17` UDP, `47` GRE, `58` ICMP IPv6, `88` EIGRP, `89` OSPF, `103` PIM, and `132` SCTP. +| `iana_number` | no | `network.iana_number` | Field containing the IANA number. | `icmp_type` | no | `icmp.type` | Field containing the ICMP type. | `icmp_code` | no | `icmp.code` | Field containing the ICMP code. -| `transport` | no | `network.transport` | Field containing the transport protocol. -Used only when the `iana_number` field is not present. +| `transport` | no | `network.transport` | Field containing the transport protocol name or number. +Used only when the `iana_number` field is not present. The following protocol names are currently supported: +`ICMP`, `IGMP`, `TCP`, `UDP`, `GRE`, `ICMP IPv6`, `EIGRP`, `OSPF`, `PIM`, and `SCTP`. | `target_field` | no | `network.community_id` | Output field for the community ID. | `seed` | no | `0` | Seed for the community ID hash. Must be between 0 and 65535 (inclusive). The seed can prevent hash collisions between network domains, such as diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/CommunityIdProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/CommunityIdProcessor.java index 27ef5a10dd5c2..0377da53846d5 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/CommunityIdProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/CommunityIdProcessor.java @@ -225,7 +225,7 @@ private static Flow buildFlow( } flow.protocol = Transport.fromObject(protocol); - switch (flow.protocol) { + switch (flow.protocol.getType()) { case Tcp, Udp, Sctp -> { flow.sourcePort = parseIntFromObjectOrString(sourcePort.get(), "source port"); if (flow.sourcePort < 1 || flow.sourcePort > 65535) { @@ -336,12 +336,12 @@ public CommunityIdProcessor create( */ public static final class Flow { - private static final List TRANSPORTS_WITH_PORTS = List.of( - Transport.Tcp, - Transport.Udp, - Transport.Sctp, - Transport.Icmp, - Transport.IcmpIpV6 + private static final List TRANSPORTS_WITH_PORTS = List.of( + Transport.Type.Tcp, + Transport.Type.Udp, + Transport.Type.Sctp, + Transport.Type.Icmp, + Transport.Type.IcmpIpV6 ); InetAddress source; @@ -362,20 +362,21 @@ boolean isOrdered() { } byte[] toBytes() { - boolean hasPort = TRANSPORTS_WITH_PORTS.contains(protocol); + Transport.Type protoType = protocol.getType(); + boolean hasPort = TRANSPORTS_WITH_PORTS.contains(protoType); int len = source.getAddress().length + destination.getAddress().length + 2 + (hasPort ? 4 : 0); ByteBuffer bb = ByteBuffer.allocate(len); boolean isOneWay = false; - if (protocol == Transport.Icmp || protocol == Transport.IcmpIpV6) { + if (protoType == Transport.Type.Icmp || protoType == Transport.Type.IcmpIpV6) { // ICMP protocols populate port fields with ICMP data - Integer equivalent = IcmpType.codeEquivalent(icmpType, protocol == Transport.IcmpIpV6); + Integer equivalent = IcmpType.codeEquivalent(icmpType, protoType == Transport.Type.IcmpIpV6); isOneWay = equivalent == null; sourcePort = icmpType; destinationPort = equivalent == null ? icmpCode : equivalent; } - boolean keepOrder = isOrdered() || ((protocol == Transport.Icmp || protocol == Transport.IcmpIpV6) && isOneWay); + boolean keepOrder = isOrdered() || ((protoType == Transport.Type.Icmp || protoType == Transport.Type.IcmpIpV6) && isOneWay); bb.put(keepOrder ? source.getAddress() : destination.getAddress()); bb.put(keepOrder ? destination.getAddress() : source.getAddress()); bb.put(toUint16(protocol.getTransportNumber() << 8)); @@ -397,39 +398,63 @@ String toCommunityId(byte[] seed) { } } - public enum Transport { - Icmp(1), - Igmp(2), - Tcp(6), - Udp(17), - Gre(47), - IcmpIpV6(58), - Eigrp(88), - Ospf(89), - Pim(103), - Sctp(132); - - private final int transportNumber; + static class Transport { + public enum Type { + Unknown(-1), + Icmp(1), + Igmp(2), + Tcp(6), + Udp(17), + Gre(47), + IcmpIpV6(58), + Eigrp(88), + Ospf(89), + Pim(103), + Sctp(132); + + private final int transportNumber; + + private static final Map TRANSPORT_NAMES; + + static { + TRANSPORT_NAMES = new HashMap<>(); + TRANSPORT_NAMES.put("icmp", Icmp); + TRANSPORT_NAMES.put("igmp", Igmp); + TRANSPORT_NAMES.put("tcp", Tcp); + TRANSPORT_NAMES.put("udp", Udp); + TRANSPORT_NAMES.put("gre", Gre); + TRANSPORT_NAMES.put("ipv6-icmp", IcmpIpV6); + TRANSPORT_NAMES.put("icmpv6", IcmpIpV6); + TRANSPORT_NAMES.put("eigrp", Eigrp); + TRANSPORT_NAMES.put("ospf", Ospf); + TRANSPORT_NAMES.put("pim", Pim); + TRANSPORT_NAMES.put("sctp", Sctp); + } - private static final Map TRANSPORT_NAMES; + Type(int transportNumber) { + this.transportNumber = transportNumber; + } - static { - TRANSPORT_NAMES = new HashMap<>(); - TRANSPORT_NAMES.put("icmp", Icmp); - TRANSPORT_NAMES.put("igmp", Igmp); - TRANSPORT_NAMES.put("tcp", Tcp); - TRANSPORT_NAMES.put("udp", Udp); - TRANSPORT_NAMES.put("gre", Gre); - TRANSPORT_NAMES.put("ipv6-icmp", IcmpIpV6); - TRANSPORT_NAMES.put("icmpv6", IcmpIpV6); - TRANSPORT_NAMES.put("eigrp", Eigrp); - TRANSPORT_NAMES.put("ospf", Ospf); - TRANSPORT_NAMES.put("pim", Pim); - TRANSPORT_NAMES.put("sctp", Sctp); + public int getTransportNumber() { + return transportNumber; + } } - Transport(int transportNumber) { + private Type type; + private int transportNumber; + + Transport(int transportNumber, Type type) { // Change constructor to public this.transportNumber = transportNumber; + this.type = type; + } + + Transport(Type type) { // Change constructor to public + this.transportNumber = type.getTransportNumber(); + this.type = type; + } + + public Type getType() { + return this.type; } public int getTransportNumber() { @@ -437,19 +462,26 @@ public int getTransportNumber() { } public static Transport fromNumber(int transportNumber) { - return switch (transportNumber) { - case 1 -> Icmp; - case 2 -> Igmp; - case 6 -> Tcp; - case 17 -> Udp; - case 47 -> Gre; - case 58 -> IcmpIpV6; - case 88 -> Eigrp; - case 89 -> Ospf; - case 103 -> Pim; - case 132 -> Sctp; - default -> throw new IllegalArgumentException("unknown transport protocol number [" + transportNumber + "]"); + if (transportNumber < 0 || transportNumber >= 255) { + // transport numbers range https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml + throw new IllegalArgumentException("invalid transport protocol number [" + transportNumber + "]"); + } + + Type type = switch (transportNumber) { + case 1 -> Type.Icmp; + case 2 -> Type.Igmp; + case 6 -> Type.Tcp; + case 17 -> Type.Udp; + case 47 -> Type.Gre; + case 58 -> Type.IcmpIpV6; + case 88 -> Type.Eigrp; + case 89 -> Type.Ospf; + case 103 -> Type.Pim; + case 132 -> Type.Sctp; + default -> Type.Unknown; }; + + return new Transport(transportNumber, type); } public static Transport fromObject(Object o) { @@ -457,8 +489,8 @@ public static Transport fromObject(Object o) { return fromNumber(number.intValue()); } else if (o instanceof String protocolStr) { // check if matches protocol name - if (TRANSPORT_NAMES.containsKey(protocolStr.toLowerCase(Locale.ROOT))) { - return TRANSPORT_NAMES.get(protocolStr.toLowerCase(Locale.ROOT)); + if (Type.TRANSPORT_NAMES.containsKey(protocolStr.toLowerCase(Locale.ROOT))) { + return new Transport(Type.TRANSPORT_NAMES.get(protocolStr.toLowerCase(Locale.ROOT))); } // check if convertible to protocol number diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/CommunityIdProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/CommunityIdProcessorTests.java index ca9b3f3d81bd9..3848f4531adcb 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/CommunityIdProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/CommunityIdProcessorTests.java @@ -166,14 +166,30 @@ public void testBeatsProtocolNumber() throws Exception { testCommunityIdProcessor(event, "1:D3t8Q1aFA6Ev0A/AO4i9PnU3AeI="); } - public void testBeatsIanaNumber() throws Exception { + public void testBeatsIanaNumberProtocolTCP() throws Exception { @SuppressWarnings("unchecked") var network = (Map) event.get("network"); network.remove("transport"); - network.put("iana_number", CommunityIdProcessor.Transport.Tcp.getTransportNumber()); + network.put("iana_number", CommunityIdProcessor.Transport.Type.Tcp.getTransportNumber()); testCommunityIdProcessor(event, "1:LQU9qZlK+B5F3KDmev6m5PMibrg="); } + public void testBeatsIanaNumberProtocolIPv4() throws Exception { + @SuppressWarnings("unchecked") + var network = (Map) event.get("network"); + network.put("iana_number", "4"); + network.remove("transport"); + @SuppressWarnings("unchecked") + var source = (Map) event.get("source"); + source.put("ip", "192.168.1.2"); + source.remove("port"); + @SuppressWarnings("unchecked") + var destination = (Map) event.get("destination"); + destination.put("ip", "10.1.2.3"); + destination.remove("port"); + testCommunityIdProcessor(event, "1:KXQzmk3bdsvD6UXj7dvQ4bM6Zvw="); + } + public void testIpv6() throws Exception { @SuppressWarnings("unchecked") var source = (Map) event.get("source"); @@ -201,10 +217,10 @@ public void testStringAndNumber() throws Exception { @SuppressWarnings("unchecked") var network = (Map) event.get("network"); network.remove("transport"); - network.put("iana_number", CommunityIdProcessor.Transport.Tcp.getTransportNumber()); + network.put("iana_number", CommunityIdProcessor.Transport.Type.Tcp.getTransportNumber()); testCommunityIdProcessor(event, "1:LQU9qZlK+B5F3KDmev6m5PMibrg="); - network.put("iana_number", Integer.toString(CommunityIdProcessor.Transport.Tcp.getTransportNumber())); + network.put("iana_number", Integer.toString(CommunityIdProcessor.Transport.Type.Tcp.getTransportNumber())); testCommunityIdProcessor(event, "1:LQU9qZlK+B5F3KDmev6m5PMibrg="); // protocol number @@ -359,8 +375,13 @@ private void testCommunityIdProcessor(Map source, int seed, Stri } public void testTransportEnum() { - for (CommunityIdProcessor.Transport t : CommunityIdProcessor.Transport.values()) { - assertThat(CommunityIdProcessor.Transport.fromNumber(t.getTransportNumber()), equalTo(t)); + for (CommunityIdProcessor.Transport.Type t : CommunityIdProcessor.Transport.Type.values()) { + if (t == CommunityIdProcessor.Transport.Type.Unknown) { + expectThrows(IllegalArgumentException.class, () -> CommunityIdProcessor.Transport.fromNumber(t.getTransportNumber())); + continue; + } + + assertThat(CommunityIdProcessor.Transport.fromNumber(t.getTransportNumber()).getType(), equalTo(t)); } } From f318f2234676a57f3e03333a4c2ee516d832fb37 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Mon, 26 Aug 2024 08:39:22 +0200 Subject: [PATCH 192/389] Give executor to cache instead of string (#111711) (#112091) Relates ES-8155 Co-authored-by: Iraklis Psaroudakis --- .../shared/SharedBlobCacheService.java | 6 ++-- .../shared/SharedBlobCacheServiceTests.java | 34 +++++++++---------- .../SearchableSnapshots.java | 2 +- .../AbstractSearchableSnapshotsTestCase.java | 6 ++-- .../store/input/FrozenIndexInputTests.java | 2 +- 5 files changed, 25 insertions(+), 25 deletions(-) diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 584e551f1cf6b..6a55738b864d1 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -333,7 +333,7 @@ public SharedBlobCacheService( NodeEnvironment environment, Settings settings, ThreadPool threadPool, - String ioExecutor, + Executor ioExecutor, BlobCacheMetrics blobCacheMetrics ) { this(environment, settings, threadPool, ioExecutor, blobCacheMetrics, System::nanoTime); @@ -343,12 +343,12 @@ public SharedBlobCacheService( NodeEnvironment environment, Settings settings, ThreadPool threadPool, - String ioExecutor, + Executor ioExecutor, BlobCacheMetrics blobCacheMetrics, LongSupplier relativeTimeInNanosSupplier ) { this.threadPool = threadPool; - this.ioExecutor = threadPool.executor(ioExecutor); + this.ioExecutor = ioExecutor; long totalFsSize; try { totalFsSize = FsProbe.getTotal(Environment.getFileStore(environment.nodeDataPaths()[0])); diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java index 6c49b50c06e82..346950d385a40 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java @@ -94,7 +94,7 @@ public void testBasicEviction() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -175,7 +175,7 @@ public void testAutoEviction() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -219,7 +219,7 @@ public void testForceEviction() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -253,7 +253,7 @@ public void testForceEvictResponse() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -287,7 +287,7 @@ public void testDecay() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -395,7 +395,7 @@ public void testMassiveDecay() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -470,7 +470,7 @@ public void testGetMultiThreaded() throws IOException { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -550,7 +550,7 @@ public void execute(Runnable command) { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -618,7 +618,7 @@ public void testFetchFullCacheEntryConcurrently() throws Exception { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -826,7 +826,7 @@ public void testCacheSizeChanges() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -844,7 +844,7 @@ public void testCacheSizeChanges() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -869,7 +869,7 @@ public void testMaybeEvictLeastUsed() throws Exception { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -967,7 +967,7 @@ public void execute(Runnable command) { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -1117,7 +1117,7 @@ public void execute(Runnable command) { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -1278,7 +1278,7 @@ public void testPopulate() throws Exception { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -1394,7 +1394,7 @@ public void testUseFullRegionSize() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) { @Override @@ -1435,7 +1435,7 @@ public void testSharedSourceInputStreamFactory() throws Exception { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java index 18ebe65d87986..4eea006b4c2f2 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java @@ -331,7 +331,7 @@ public Collection createComponents(PluginServices services) { nodeEnvironment, settings, threadPool, - SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, + threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), new BlobCacheMetrics(services.telemetryProvider().getMeterRegistry()) ); this.frozenCacheService.set(sharedBlobCacheService); diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java index 5f083d568fed8..41121453e41a4 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java @@ -144,7 +144,7 @@ protected SharedBlobCacheService defaultFrozenCacheService() { nodeEnvironment, Settings.EMPTY, threadPool, - SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, + threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), BlobCacheMetrics.NOOP ); } @@ -167,7 +167,7 @@ protected SharedBlobCacheService randomFrozenCacheService() { singlePathNodeEnvironment, cacheSettings.build(), threadPool, - SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, + threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), BlobCacheMetrics.NOOP ); } @@ -192,7 +192,7 @@ protected SharedBlobCacheService createFrozenCacheService(final ByteSi .put(SharedBlobCacheService.SHARED_CACHE_RANGE_SIZE_SETTING.getKey(), cacheRangeSize) .build(), threadPool, - SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, + threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), BlobCacheMetrics.NOOP ); } diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java index 81e9c06a149b9..53ea908ad8801 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java @@ -111,7 +111,7 @@ public void testRandomReads() throws IOException { nodeEnvironment, settings, threadPool, - SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, + threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), BlobCacheMetrics.NOOP ); CacheService cacheService = randomCacheService(); From 32b4aa3c448ed5c98854d29d983f903c94649a07 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 26 Aug 2024 13:52:20 +0700 Subject: [PATCH 193/389] Fix TSDBIndexingIT#testTrimId() test failure. (#112194) Sometimes initial indexing results into exactly one segment. However, multiple segments are needed to perform the force merge that purges stored fields for _id field in a later stage of the test. This change tweaks the test such that an extra update is performed after initial indexing. This should always create an extra segment, so that this test can actual purge stored fields for _id field. Closes #112124 --- .../elasticsearch/datastreams/TSDBIndexingIT.java | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java index 24c373df72144..a0a0681dbd245 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.Template; @@ -35,6 +36,7 @@ import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.indices.InvalidIndexTemplateException; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; @@ -457,6 +459,16 @@ public void testTrimId() throws Exception { indexName = bulkResponse.getItems()[0].getIndex(); } client().admin().indices().refresh(new RefreshRequest(dataStreamName)).actionGet(); + + // In rare cases we can end up with a single segment shard, which means we can't trim away the _id later. + // So update an existing doc to create a new segment without adding a new document after force merging: + var indexRequest = new IndexRequest(indexName).setIfPrimaryTerm(1L) + .setIfSeqNo((numBulkRequests * numDocsPerBulk) - 1) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + indexRequest.source(DOC.replace("$time", formatInstant(time.minusMillis(1))), XContentType.JSON); + var res = client().index(indexRequest).actionGet(); + assertThat(res.status(), equalTo(RestStatus.OK)); + assertThat(res.getVersion(), equalTo(2L)); } // Check whether there are multiple segments: @@ -494,7 +506,7 @@ public void testTrimId() throws Exception { assertThat(retentionLeasesStats.retentionLeases().leases(), hasSize(1)); assertThat( retentionLeasesStats.retentionLeases().leases().iterator().next().retainingSequenceNumber(), - equalTo((long) numBulkRequests * numDocsPerBulk) + equalTo((long) numBulkRequests * numDocsPerBulk + 1) ); }); From 44758b3823a4982e9c62598f184cf90b67ab5d2a Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Mon, 26 Aug 2024 10:00:39 +0300 Subject: [PATCH 194/389] Restore useAlternatingSort in `MergingDigest` (#112148) --- .../src/main/java/org/elasticsearch/tdigest/MergingDigest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java b/libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java index 172b0f24dfd99..fc22bda52e104 100644 --- a/libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java +++ b/libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java @@ -92,7 +92,7 @@ public class MergingDigest extends AbstractTDigest { private final int[] order; // if true, alternate upward and downward merge passes - public boolean useAlternatingSort = false; + public boolean useAlternatingSort = true; // if true, use higher working value of compression during construction, then reduce on presentation public boolean useTwoLevelCompression = true; From 54e910636f009034fdb4e6cb6bf279aeee272bdc Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Mon, 26 Aug 2024 11:33:37 +0200 Subject: [PATCH 195/389] Unmute SQL CSV spec tests (#112196) Unmute SQL CSV Spec tests fixed by https://github.com/elastic/elasticsearch/pull/111938 --- muted-tests.yml | 36 ------------------------------------ 1 file changed, 36 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 68a53e5cb04dd..77ec7800f8a4d 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -128,42 +128,6 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT method: "test {p0=esql/26_aggs_bucket/friendlier BUCKET interval: monthly #110916}" issue: https://github.com/elastic/elasticsearch/issues/111902 -- class: org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT - method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_1} - issue: https://github.com/elastic/elasticsearch/issues/111918 -- class: org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT - method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_2} - issue: https://github.com/elastic/elasticsearch/issues/111919 -- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT - method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_2} - issue: https://github.com/elastic/elasticsearch/issues/111919 -- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT - method: test {date.testDateParseHaving} - issue: https://github.com/elastic/elasticsearch/issues/111921 -- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT - method: test {datetime.testDateTimeParseHaving} - issue: https://github.com/elastic/elasticsearch/issues/111922 -- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT - method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_1} - issue: https://github.com/elastic/elasticsearch/issues/111918 -- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT - issue: https://github.com/elastic/elasticsearch/issues/111923 -- class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT - method: test {datetime.testDateTimeParseHaving} - issue: https://github.com/elastic/elasticsearch/issues/111922 -- class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT - method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_1} - issue: https://github.com/elastic/elasticsearch/issues/111918 -- class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT - method: test {date.testDateParseHaving} - issue: https://github.com/elastic/elasticsearch/issues/111921 -- class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT - method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_2} - issue: https://github.com/elastic/elasticsearch/issues/111919 -- class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT - issue: https://github.com/elastic/elasticsearch/issues/111923 -- class: org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT - issue: https://github.com/elastic/elasticsearch/issues/111923 - class: org.elasticsearch.xpack.esql.qa.mixed.FieldExtractorIT method: testScaledFloat issue: https://github.com/elastic/elasticsearch/issues/112003 From 6b96226140064bcf14d5eb4a696a30be8a4e48cf Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Mon, 26 Aug 2024 11:44:42 +0200 Subject: [PATCH 196/389] Fix NPE when executing doc value queries over shape geometries with empty segments (#112139) Return empty scorer instead of null. --- docs/changelog/112139.yaml | 6 ++++ .../lucene/spatial/ShapeDocValuesQuery.java | 17 +++------ .../CartesianShapeDocValuesQueryTests.java | 36 +++++++++++++++++++ .../LatLonShapeDocValuesQueryTests.java | 35 ++++++++++++++++++ 4 files changed, 82 insertions(+), 12 deletions(-) create mode 100644 docs/changelog/112139.yaml diff --git a/docs/changelog/112139.yaml b/docs/changelog/112139.yaml new file mode 100644 index 0000000000000..d6d992ec1dcf2 --- /dev/null +++ b/docs/changelog/112139.yaml @@ -0,0 +1,6 @@ +pr: 112139 +summary: Fix NPE when executing doc value queries over shape geometries with empty + segments +area: Geo +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/lucene/spatial/ShapeDocValuesQuery.java b/server/src/main/java/org/elasticsearch/lucene/spatial/ShapeDocValuesQuery.java index 6804901d9511e..f79d5303ab65a 100644 --- a/server/src/main/java/org/elasticsearch/lucene/spatial/ShapeDocValuesQuery.java +++ b/server/src/main/java/org/elasticsearch/lucene/spatial/ShapeDocValuesQuery.java @@ -15,6 +15,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ConstantScoreScorer; import org.apache.lucene.search.ConstantScoreWeight; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; @@ -109,11 +110,7 @@ private ConstantScoreWeight getStandardWeight(ScoreMode scoreMode, float boost) @Override public Scorer scorer(LeafReaderContext context) throws IOException { - final ScorerSupplier scorerSupplier = scorerSupplier(context); - if (scorerSupplier == null) { - return null; - } - return scorerSupplier.get(Long.MAX_VALUE); + return scorerSupplier(context).get(Long.MAX_VALUE); } @Override @@ -127,7 +124,7 @@ public Scorer get(long leadCost) throws IOException { // binary doc values allocate an array upfront, lets only allocate it if we are going to use it final BinaryDocValues values = context.reader().getBinaryDocValues(field); if (values == null) { - return null; + return new ConstantScoreScorer(weight, 0f, scoreMode, DocIdSetIterator.empty()); } final GeometryDocValueReader reader = new GeometryDocValueReader(); final Component2DVisitor visitor = Component2DVisitor.getVisitor(component2D, relation, encoder); @@ -171,11 +168,7 @@ private ConstantScoreWeight getContainsWeight(ScoreMode scoreMode, float boost) @Override public Scorer scorer(LeafReaderContext context) throws IOException { - final ScorerSupplier scorerSupplier = scorerSupplier(context); - if (scorerSupplier == null) { - return null; - } - return scorerSupplier.get(Long.MAX_VALUE); + return scorerSupplier(context).get(Long.MAX_VALUE); } @Override @@ -189,7 +182,7 @@ public Scorer get(long leadCost) throws IOException { // binary doc values allocate an array upfront, lets only allocate it if we are going to use it final BinaryDocValues values = context.reader().getBinaryDocValues(field); if (values == null) { - return null; + return new ConstantScoreScorer(weight, 0f, scoreMode, DocIdSetIterator.empty()); } final Component2DVisitor[] visitors = new Component2DVisitor[components2D.size()]; for (int i = 0; i < components2D.size(); i++) { diff --git a/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQueryTests.java b/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQueryTests.java index 4ce3d87d6420d..9ee84fcaa352f 100644 --- a/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQueryTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQueryTests.java @@ -19,6 +19,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.NoMergeScheduler; import org.apache.lucene.index.SerialMergeScheduler; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -30,6 +31,7 @@ import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geo.XShapeTestUtil; import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.index.mapper.ShapeIndexer; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -54,6 +56,40 @@ public void testEqualsAndHashcode() { QueryUtils.checkUnequal(q1, q4); } + public void testEmptySegment() throws Exception { + IndexWriterConfig iwc = newIndexWriterConfig(); + // No merges + iwc.setMergeScheduler(NoMergeScheduler.INSTANCE); + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, iwc); + ShapeIndexer indexer = new CartesianShapeIndexer(FIELD_NAME); + Geometry geometry = new org.elasticsearch.geometry.Point(0, 0); + Document document = new Document(); + List fields = indexer.indexShape(geometry); + for (IndexableField field : fields) { + document.add(field); + } + BinaryShapeDocValuesField docVal = new BinaryShapeDocValuesField(FIELD_NAME, CoordinateEncoder.CARTESIAN); + docVal.add(fields, geometry); + document.add(docVal); + w.addDocument(document); + w.flush(); + // add empty segment + w.addDocument(new Document()); + w.flush(); + final IndexReader r = DirectoryReader.open(w); + w.close(); + + IndexSearcher s = newSearcher(r); + XYRectangle rectangle = new XYRectangle(-10, 10, -10, 10); + for (ShapeField.QueryRelation relation : ShapeField.QueryRelation.values()) { + Query indexQuery = XYShape.newGeometryQuery(FIELD_NAME, relation, rectangle); + Query docValQuery = new CartesianShapeDocValuesQuery(FIELD_NAME, relation, rectangle); + assertQueries(s, indexQuery, docValQuery, 1); + } + IOUtils.close(r, dir); + } + public void testIndexSimpleShapes() throws Exception { IndexWriterConfig iwc = newIndexWriterConfig(); // Else seeds may not reproduce: diff --git a/server/src/test/java/org/elasticsearch/lucene/spatial/LatLonShapeDocValuesQueryTests.java b/server/src/test/java/org/elasticsearch/lucene/spatial/LatLonShapeDocValuesQueryTests.java index 99fab30e3ade2..e00b7fa4736b3 100644 --- a/server/src/test/java/org/elasticsearch/lucene/spatial/LatLonShapeDocValuesQueryTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/spatial/LatLonShapeDocValuesQueryTests.java @@ -20,6 +20,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.NoMergeScheduler; import org.apache.lucene.index.SerialMergeScheduler; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -56,6 +57,40 @@ public void testEqualsAndHashcode() { QueryUtils.checkUnequal(q1, q4); } + public void testEmptySegment() throws Exception { + IndexWriterConfig iwc = newIndexWriterConfig(); + // No merges + iwc.setMergeScheduler(NoMergeScheduler.INSTANCE); + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, iwc); + GeoShapeIndexer indexer = new GeoShapeIndexer(Orientation.CCW, FIELD_NAME); + Geometry geometry = new org.elasticsearch.geometry.Point(0, 0); + Document document = new Document(); + List fields = indexer.indexShape(geometry); + for (IndexableField field : fields) { + document.add(field); + } + BinaryShapeDocValuesField docVal = new BinaryShapeDocValuesField(FIELD_NAME, CoordinateEncoder.GEO); + docVal.add(fields, geometry); + document.add(docVal); + w.addDocument(document); + w.flush(); + // add empty segment + w.addDocument(new Document()); + w.flush(); + final IndexReader r = DirectoryReader.open(w); + w.close(); + + IndexSearcher s = newSearcher(r); + Rectangle rectangle = new Rectangle(-10, 10, -10, 10); + for (ShapeField.QueryRelation relation : ShapeField.QueryRelation.values()) { + Query indexQuery = LatLonShape.newGeometryQuery(FIELD_NAME, relation, rectangle); + Query docValQuery = new LatLonShapeDocValuesQuery(FIELD_NAME, relation, rectangle); + assertQueries(s, indexQuery, docValQuery, 1); + } + IOUtils.close(r, dir); + } + public void testIndexSimpleShapes() throws Exception { IndexWriterConfig iwc = newIndexWriterConfig(); // Else seeds may not reproduce: From 785fe5384bda21ff1f7ba52e0fdcf2061506a983 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Mon, 26 Aug 2024 12:56:08 +0300 Subject: [PATCH 197/389] Adding support for allow_partial_search_results in PIT (#111516) --- docs/changelog/111516.yaml | 5 + .../search/point-in-time-api.asciidoc | 38 ++++ .../paginate-search-results.asciidoc | 12 +- .../action/search/PointInTimeIT.java | 205 +++++++++++++++++- .../org/elasticsearch/TransportVersions.java | 1 + .../search/AbstractSearchAsyncAction.java | 2 +- .../action/search/ClearScrollController.java | 4 + .../action/search/OpenPointInTimeRequest.java | 26 ++- .../search/OpenPointInTimeResponse.java | 42 +++- .../search/RestOpenPointInTimeAction.java | 1 + .../action/search/SearchContextId.java | 29 ++- .../action/search/SearchContextIdForNode.java | 46 +++- .../TransportOpenPointInTimeAction.java | 40 +++- .../action/search/TransportSearchAction.java | 58 +++-- .../RestOpenPointInTimeActionTests.java | 2 +- .../action/search/SearchContextIdTests.java | 86 ++++++-- .../search/TransportSearchActionTests.java | 6 +- .../elasticsearch/test/ESIntegTestCase.java | 7 + .../execution/sample/CircuitBreakerTests.java | 2 +- .../search/PITAwareQueryClientTests.java | 2 +- .../sequence/CircuitBreakerTests.java | 2 +- .../authz/AuthorizationServiceTests.java | 3 +- .../xpack/sql/analysis/CancellationTests.java | 2 +- .../ClientTransformIndexerTests.java | 2 +- 24 files changed, 530 insertions(+), 93 deletions(-) create mode 100644 docs/changelog/111516.yaml diff --git a/docs/changelog/111516.yaml b/docs/changelog/111516.yaml new file mode 100644 index 0000000000000..96e8bd843f750 --- /dev/null +++ b/docs/changelog/111516.yaml @@ -0,0 +1,5 @@ +pr: 111516 +summary: Adding support for `allow_partial_search_results` in PIT +area: Search +type: enhancement +issues: [] diff --git a/docs/reference/search/point-in-time-api.asciidoc b/docs/reference/search/point-in-time-api.asciidoc index 2e32324cb44d9..9cd91626c7600 100644 --- a/docs/reference/search/point-in-time-api.asciidoc +++ b/docs/reference/search/point-in-time-api.asciidoc @@ -78,6 +78,44 @@ IMPORTANT: The open point in time request and each subsequent search request can return different `id`; thus always use the most recently received `id` for the next search request. +In addition to the `keep_alive` parameter, the `allow_partial_search_results` parameter +can also be defined. +This parameter determines whether the <> +should tolerate unavailable shards or <> when +initially creating the PIT. +If set to true, the PIT will be created with the available shards, along with a +reference to any missing ones. +If set to false, the operation will fail if any shard is unavailable. +The default value is false. + +The PIT response includes a summary of the total number of shards, as well as the number +of successful shards when creating the PIT. + +[source,console] +-------------------------------------------------- +POST /my-index-000001/_pit?keep_alive=1m&allow_partial_search_results=true +-------------------------------------------------- +// TEST[setup:my_index] + +[source,js] +-------------------------------------------------- +{ + "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA=", + "_shards": { + "total": 10, + "successful": 10, + "skipped": 0, + "failed": 0 + } +} +-------------------------------------------------- +// NOTCONSOLE + +When a PIT that contains shard failures is used in a search request, the missing are +always reported in the search response as a NoShardAvailableActionException exception. +To get rid of these exceptions, a new PIT needs to be created so that shards missing +from the previous PIT can be handled, assuming they become available in the meantime. + [[point-in-time-keep-alive]] ==== Keeping point in time alive The `keep_alive` parameter, which is passed to a open point in time request and diff --git a/docs/reference/search/search-your-data/paginate-search-results.asciidoc b/docs/reference/search/search-your-data/paginate-search-results.asciidoc index edd1546dd0854..f69fd60be0484 100644 --- a/docs/reference/search/search-your-data/paginate-search-results.asciidoc +++ b/docs/reference/search/search-your-data/paginate-search-results.asciidoc @@ -106,9 +106,9 @@ The search response includes an array of `sort` values for each hit: "_id" : "654322", "_score" : null, "_source" : ..., - "sort" : [ + "sort" : [ 1463538855, - "654322" + "654322" ] }, { @@ -118,7 +118,7 @@ The search response includes an array of `sort` values for each hit: "_source" : ..., "sort" : [ <1> 1463538857, - "654323" + "654323" ] } ] @@ -150,7 +150,7 @@ GET twitter/_search -------------------------------------------------- //TEST[continued] -Repeat this process by updating the `search_after` array every time you retrieve a +Repeat this process by updating the `search_after` array every time you retrieve a new page of results. If a <> occurs between these requests, the order of your results may change, causing inconsistent results across pages. To prevent this, you can create a <> to @@ -167,10 +167,12 @@ The API returns a PIT ID. [source,console-result] ---- { - "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==" + "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", + "_shards": ... } ---- // TESTRESPONSE[s/"id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA=="/"id": $body.id/] +// TESTRESPONSE[s/"_shards": \.\.\./"_shards": "$body._shards"/] To get the first page of results, submit a search request with a `sort` argument. If using a PIT, specify the PIT ID in the `pit.id` parameter and omit diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java index a9a5bb074c9ac..da2dfc50d7fe9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java @@ -10,12 +10,16 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.NoShardAvailableActionException; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils; import org.elasticsearch.action.admin.indices.stats.CommonStats; +import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -54,11 +58,14 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; public class PointInTimeIT extends ESIntegTestCase { @@ -84,7 +91,7 @@ public void testBasic() { prepareIndex("test").setId(id).setSource("value", i).get(); } refresh("test"); - BytesReference pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); assertResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp1 -> { assertThat(resp1.pointInTimeId(), equalTo(pitId)); assertHitCount(resp1, numDocs); @@ -130,7 +137,7 @@ public void testMultipleIndices() { prepareIndex(index).setId(id).setSource("value", i).get(); } refresh(); - BytesReference pitId = openPointInTime(new String[] { "*" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "*" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); try { int moreDocs = randomIntBetween(10, 50); assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { @@ -212,7 +219,7 @@ public void testRelocation() throws Exception { prepareIndex("test").setId(Integer.toString(i)).setSource("value", i).get(); } refresh(); - BytesReference pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); try { assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs); @@ -264,7 +271,7 @@ public void testPointInTimeNotFound() throws Exception { prepareIndex("index").setId(id).setSource("value", i).get(); } refresh(); - BytesReference pit = openPointInTime(new String[] { "index" }, TimeValue.timeValueSeconds(5)); + BytesReference pit = openPointInTime(new String[] { "index" }, TimeValue.timeValueSeconds(5)).getPointInTimeId(); assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pit)), resp1 -> { assertHitCount(resp1, index1); if (rarely()) { @@ -305,7 +312,7 @@ public void testIndexNotFound() { prepareIndex("index-2").setId(id).setSource("value", i).get(); } refresh(); - BytesReference pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueMinutes(2)); + BytesReference pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); try { assertNoFailuresAndResponse( prepareSearch().setPointInTime(new PointInTimeBuilder(pit)), @@ -348,7 +355,7 @@ public void testCanMatch() throws Exception { assertAcked(prepareCreate("test").setSettings(settings).setMapping(""" {"properties":{"created_date":{"type": "date", "format": "yyyy-MM-dd"}}}""")); ensureGreen("test"); - BytesReference pitId = openPointInTime(new String[] { "test*" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test*" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); try { for (String node : internalCluster().nodesInclude("test")) { for (IndexService indexService : internalCluster().getInstance(IndicesService.class, node)) { @@ -415,7 +422,7 @@ public void testPartialResults() throws Exception { prepareIndex(randomFrom("test-2")).setId(Integer.toString(i)).setSource("value", i).get(); } refresh(); - BytesReference pitId = openPointInTime(new String[] { "test-*" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test-*" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); try { assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs1 + numDocs2); @@ -447,7 +454,7 @@ public void testPITTiebreak() throws Exception { } } refresh("index-*"); - BytesReference pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueHours(1)); + BytesReference pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueHours(1)).getPointInTimeId(); try { for (int size = 1; size <= numIndex; size++) { SortOrder order = randomBoolean() ? SortOrder.ASC : SortOrder.DESC; @@ -532,6 +539,176 @@ public void testOpenPITConcurrentShardRequests() throws Exception { } } + public void testMissingShardsWithPointInTime() throws Exception { + final Settings nodeAttributes = Settings.builder().put("node.attr.foo", "bar").build(); + final String masterNode = internalCluster().startMasterOnlyNode(nodeAttributes); + List dataNodes = internalCluster().startDataOnlyNodes(2, nodeAttributes); + + final String index = "my_test_index"; + // tried to have randomIntBetween(3, 10) but having more shards than 3 was taking forever and throwing timeouts + final int numShards = 3; + final int numReplicas = 0; + // create an index with numShards shards and 0 replicas + createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .put("index.routing.allocation.require.foo", "bar") + .build() + ); + + // index some documents + int numDocs = randomIntBetween(10, 50); + for (int i = 0; i < numDocs; i++) { + String id = Integer.toString(i); + prepareIndex(index).setId(id).setSource("value", i).get(); + } + refresh(index); + + // create a PIT when all shards are present + OpenPointInTimeResponse pointInTimeResponse = openPointInTime(new String[] { index }, TimeValue.timeValueMinutes(1)); + try { + // ensure that the PIT created has all the shards there + assertThat(numShards, equalTo(pointInTimeResponse.getTotalShards())); + assertThat(numShards, equalTo(pointInTimeResponse.getSuccessfulShards())); + assertThat(0, equalTo(pointInTimeResponse.getFailedShards())); + assertThat(0, equalTo(pointInTimeResponse.getSkippedShards())); + + // make a request using the above PIT + assertResponse( + prepareSearch().setQuery(new MatchAllQueryBuilder()) + .setPointInTime(new PointInTimeBuilder(pointInTimeResponse.getPointInTimeId())), + resp -> { + // ensure that al docs are returned + assertThat(resp.pointInTimeId(), equalTo(pointInTimeResponse.getPointInTimeId())); + assertHitCount(resp, numDocs); + } + ); + + // pick up a random data node to shut down + final String randomDataNode = randomFrom(dataNodes); + + // find which shards to relocate + final String nodeId = admin().cluster().prepareNodesInfo(randomDataNode).get().getNodes().get(0).getNode().getId(); + Set shardsToRelocate = new HashSet<>(); + for (ShardStats stats : admin().indices().prepareStats(index).get().getShards()) { + if (nodeId.equals(stats.getShardRouting().currentNodeId())) { + shardsToRelocate.add(stats.getShardRouting().shardId().id()); + } + } + + final int shardsRemoved = shardsToRelocate.size(); + + // shut down the random data node + internalCluster().stopNode(randomDataNode); + + // ensure that the index is Red + ensureRed(index); + + // verify that not all documents can now be retrieved + assertResponse(prepareSearch().setQuery(new MatchAllQueryBuilder()), resp -> { + assertThat(resp.getSuccessfulShards(), equalTo(numShards - shardsRemoved)); + assertThat(resp.getFailedShards(), equalTo(shardsRemoved)); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, lessThan((long) numDocs)); + }); + + // create a PIT when some shards are missing + OpenPointInTimeResponse pointInTimeResponseOneNodeDown = openPointInTime( + new String[] { index }, + TimeValue.timeValueMinutes(10), + true + ); + try { + // assert that some shards are indeed missing from PIT + assertThat(pointInTimeResponseOneNodeDown.getTotalShards(), equalTo(numShards)); + assertThat(pointInTimeResponseOneNodeDown.getSuccessfulShards(), equalTo(numShards - shardsRemoved)); + assertThat(pointInTimeResponseOneNodeDown.getFailedShards(), equalTo(shardsRemoved)); + assertThat(pointInTimeResponseOneNodeDown.getSkippedShards(), equalTo(0)); + + // ensure that the response now contains fewer documents than the total number of indexed documents + assertResponse( + prepareSearch().setQuery(new MatchAllQueryBuilder()) + .setPointInTime(new PointInTimeBuilder(pointInTimeResponseOneNodeDown.getPointInTimeId())), + resp -> { + assertThat(resp.getSuccessfulShards(), equalTo(numShards - shardsRemoved)); + assertThat(resp.getFailedShards(), equalTo(shardsRemoved)); + assertThat(resp.pointInTimeId(), equalTo(pointInTimeResponseOneNodeDown.getPointInTimeId())); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, lessThan((long) numDocs)); + } + ); + + // add another node to the cluster and re-allocate the shards + final String newNodeName = internalCluster().startDataOnlyNode(nodeAttributes); + try { + for (int shardId : shardsToRelocate) { + ClusterRerouteUtils.reroute(client(), new AllocateEmptyPrimaryAllocationCommand(index, shardId, newNodeName, true)); + } + ensureGreen(TimeValue.timeValueMinutes(2), index); + + // index some more documents + for (int i = numDocs; i < numDocs * 2; i++) { + String id = Integer.toString(i); + prepareIndex(index).setId(id).setSource("value", i).get(); + } + refresh(index); + + // ensure that we now see at least numDocs results from the updated index + assertResponse(prepareSearch().setQuery(new MatchAllQueryBuilder()), resp -> { + assertThat(resp.getSuccessfulShards(), equalTo(numShards)); + assertThat(resp.getFailedShards(), equalTo(0)); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, greaterThan((long) numDocs)); + }); + + // ensure that when using the previously created PIT, we'd see the same number of documents as before regardless of the + // newly indexed documents + assertResponse( + prepareSearch().setQuery(new MatchAllQueryBuilder()) + .setPointInTime(new PointInTimeBuilder(pointInTimeResponseOneNodeDown.getPointInTimeId())), + resp -> { + assertThat(resp.pointInTimeId(), equalTo(pointInTimeResponseOneNodeDown.getPointInTimeId())); + assertThat(resp.getTotalShards(), equalTo(numShards)); + assertThat(resp.getSuccessfulShards(), equalTo(numShards - shardsRemoved)); + assertThat(resp.getFailedShards(), equalTo(shardsRemoved)); + assertThat(resp.getShardFailures().length, equalTo(shardsRemoved)); + for (var failure : resp.getShardFailures()) { + assertTrue(shardsToRelocate.contains(failure.shardId())); + assertThat(failure.getCause(), instanceOf(NoShardAvailableActionException.class)); + } + assertNotNull(resp.getHits().getTotalHits()); + // we expect less documents as the newly indexed ones should not be part of the PIT + assertThat(resp.getHits().getTotalHits().value, lessThan((long) numDocs)); + } + ); + + Exception exc = expectThrows( + Exception.class, + () -> prepareSearch().setQuery(new MatchAllQueryBuilder()) + .setPointInTime(new PointInTimeBuilder(pointInTimeResponseOneNodeDown.getPointInTimeId())) + .setAllowPartialSearchResults(false) + .get() + ); + assertThat(exc.getCause().getMessage(), containsString("missing shards")); + + } finally { + internalCluster().stopNode(newNodeName); + } + } finally { + closePointInTime(pointInTimeResponseOneNodeDown.getPointInTimeId()); + } + + } finally { + closePointInTime(pointInTimeResponse.getPointInTimeId()); + internalCluster().stopNode(masterNode); + for (String dataNode : dataNodes) { + internalCluster().stopNode(dataNode); + } + } + } + @SuppressWarnings({ "rawtypes", "unchecked" }) private void assertPagination(PointInTimeBuilder pit, int expectedNumDocs, int size, SortBuilder... sorts) throws Exception { Set seen = new HashSet<>(); @@ -590,10 +767,14 @@ private void assertPagination(PointInTimeBuilder pit, int expectedNumDocs, int s assertThat(seen.size(), equalTo(expectedNumDocs)); } - private BytesReference openPointInTime(String[] indices, TimeValue keepAlive) { - OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); - final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); - return response.getPointInTimeId(); + private OpenPointInTimeResponse openPointInTime(String[] indices, TimeValue keepAlive) { + return openPointInTime(indices, keepAlive, false); + } + + private OpenPointInTimeResponse openPointInTime(String[] indices, TimeValue keepAlive, boolean allowPartialSearchResults) { + OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive) + .allowPartialSearchResults(allowPartialSearchResults); + return client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); } private void closePointInTime(BytesReference readerId) { diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 9fe270e933785..33a16797e7e23 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -195,6 +195,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_PROFILE_SLEEPS = def(8_725_00_0); public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_726_00_0); public static final TransportVersion LTR_SERVERLESS_RELEASE = def(8_727_00_0); + public static final TransportVersion ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT = def(8_728_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 4fd551994e2a0..1e5b5ebbefe48 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -707,7 +707,7 @@ public void sendSearchResponse(SearchResponseSections internalSearchResponse, At final String scrollId = request.scroll() != null ? TransportSearchHelper.buildScrollId(queryResults) : null; final BytesReference searchContextId; if (buildPointInTimeFromSearchResults()) { - searchContextId = SearchContextId.encode(queryResults.asList(), aliasFilter, minTransportVersion); + searchContextId = SearchContextId.encode(queryResults.asList(), aliasFilter, minTransportVersion, failures); } else { if (request.source() != null && request.source().pointInTimeBuilder() != null diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java index 04573f72068f3..965b19a69b858 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java @@ -166,6 +166,10 @@ public static void closeContexts( final var successes = new AtomicInteger(); try (RefCountingRunnable refs = new RefCountingRunnable(() -> l.onResponse(successes.get()))) { for (SearchContextIdForNode contextId : contextIds) { + if (contextId.getNode() == null) { + // the shard was missing when creating the PIT, ignore. + continue; + } final DiscoveryNode node = nodeLookup.apply(contextId.getClusterAlias(), contextId.getNode()); if (node != null) { try { diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java index a1cd4df25a25c..146418839f063 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java @@ -41,6 +41,8 @@ public final class OpenPointInTimeRequest extends ActionRequest implements Indic private QueryBuilder indexFilter; + private boolean allowPartialSearchResults = false; + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = SearchRequest.DEFAULT_INDICES_OPTIONS; public OpenPointInTimeRequest(String... indices) { @@ -60,6 +62,9 @@ public OpenPointInTimeRequest(StreamInput in) throws IOException { if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { this.indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class); } + if (in.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + this.allowPartialSearchResults = in.readBoolean(); + } } @Override @@ -76,6 +81,11 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { out.writeOptionalWriteable(indexFilter); } + if (out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + out.writeBoolean(allowPartialSearchResults); + } else if (allowPartialSearchResults) { + throw new IOException("[allow_partial_search_results] is not supported on nodes with version " + out.getTransportVersion()); + } } @Override @@ -180,6 +190,15 @@ public boolean includeDataStreams() { return true; } + public boolean allowPartialSearchResults() { + return allowPartialSearchResults; + } + + public OpenPointInTimeRequest allowPartialSearchResults(boolean allowPartialSearchResults) { + this.allowPartialSearchResults = allowPartialSearchResults; + return this; + } + @Override public String getDescription() { return "open search context: indices [" + String.join(",", indices) + "] keep_alive [" + keepAlive + "]"; @@ -200,6 +219,8 @@ public String toString() { + ", preference='" + preference + '\'' + + ", allowPartialSearchResults=" + + allowPartialSearchResults + '}'; } @@ -218,12 +239,13 @@ public boolean equals(Object o) { && indicesOptions.equals(that.indicesOptions) && keepAlive.equals(that.keepAlive) && Objects.equals(routing, that.routing) - && Objects.equals(preference, that.preference); + && Objects.equals(preference, that.preference) + && Objects.equals(allowPartialSearchResults, that.allowPartialSearchResults); } @Override public int hashCode() { - int result = Objects.hash(indicesOptions, keepAlive, maxConcurrentShardRequests, routing, preference); + int result = Objects.hash(indicesOptions, keepAlive, maxConcurrentShardRequests, routing, preference, allowPartialSearchResults); result = 31 * result + Arrays.hashCode(indices); return result; } diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java index dafcee894c9a6..4a4c0252fb109 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.search; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamOutput; @@ -18,22 +19,46 @@ import java.util.Base64; import java.util.Objects; +import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; + public final class OpenPointInTimeResponse extends ActionResponse implements ToXContentObject { private final BytesReference pointInTimeId; - public OpenPointInTimeResponse(BytesReference pointInTimeId) { + private final int totalShards; + private final int successfulShards; + private final int failedShards; + private final int skippedShards; + + public OpenPointInTimeResponse( + BytesReference pointInTimeId, + int totalShards, + int successfulShards, + int failedShards, + int skippedShards + ) { this.pointInTimeId = Objects.requireNonNull(pointInTimeId, "Point in time parameter must be not null"); + this.totalShards = totalShards; + this.successfulShards = successfulShards; + this.failedShards = failedShards; + this.skippedShards = skippedShards; } @Override public void writeTo(StreamOutput out) throws IOException { out.writeBytesReference(pointInTimeId); + if (out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + out.writeVInt(totalShards); + out.writeVInt(successfulShards); + out.writeVInt(failedShards); + out.writeVInt(skippedShards); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("id", Base64.getUrlEncoder().encodeToString(BytesReference.toBytes(pointInTimeId))); + buildBroadcastShardsHeader(builder, params, totalShards, successfulShards, failedShards, skippedShards, null); builder.endObject(); return builder; } @@ -42,4 +67,19 @@ public BytesReference getPointInTimeId() { return pointInTimeId; } + public int getTotalShards() { + return totalShards; + } + + public int getSuccessfulShards() { + return successfulShards; + } + + public int getFailedShards() { + return failedShards; + } + + public int getSkippedShards() { + return skippedShards; + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java index 0e7f3f9111842..5966a1c924745 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java @@ -47,6 +47,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC openRequest.routing(request.param("routing")); openRequest.preference(request.param("preference")); openRequest.keepAlive(TimeValue.parseTimeValue(request.param("keep_alive"), null, "keep_alive")); + openRequest.allowPartialSearchResults(request.paramAsBoolean("allow_partial_search_results", false)); if (request.hasParam("max_concurrent_shard_requests")) { final int maxConcurrentShardRequests = request.paramAsInt( "max_concurrent_shard_requests", diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java index 95d22e8a9034e..2e4dc724413ea 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -58,12 +59,30 @@ public boolean contains(ShardSearchContextId contextId) { public static BytesReference encode( List searchPhaseResults, Map aliasFilter, - TransportVersion version + TransportVersion version, + ShardSearchFailure[] shardFailures ) { + assert shardFailures.length == 0 || version.onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT) + : "[allow_partial_search_results] cannot be enabled on a cluster that has not been fully upgraded to version [" + + TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT + + "] or higher."; try (var out = new BytesStreamOutput()) { out.setTransportVersion(version); TransportVersion.writeVersion(version, out); - out.writeCollection(searchPhaseResults, SearchContextId::writeSearchPhaseResult); + boolean allowNullContextId = out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT); + int shardSize = searchPhaseResults.size() + (allowNullContextId ? shardFailures.length : 0); + out.writeVInt(shardSize); + for (var searchResult : searchPhaseResults) { + final SearchShardTarget target = searchResult.getSearchShardTarget(); + target.getShardId().writeTo(out); + new SearchContextIdForNode(target.getClusterAlias(), target.getNodeId(), searchResult.getContextId()).writeTo(out); + } + if (allowNullContextId) { + for (var failure : shardFailures) { + failure.shard().getShardId().writeTo(out); + new SearchContextIdForNode(failure.shard().getClusterAlias(), null, null).writeTo(out); + } + } out.writeMap(aliasFilter, StreamOutput::writeWriteable); return out.bytes(); } catch (IOException e) { @@ -72,12 +91,6 @@ public static BytesReference encode( } } - private static void writeSearchPhaseResult(StreamOutput out, SearchPhaseResult searchPhaseResult) throws IOException { - final SearchShardTarget target = searchPhaseResult.getSearchShardTarget(); - target.getShardId().writeTo(out); - new SearchContextIdForNode(target.getClusterAlias(), target.getNodeId(), searchPhaseResult.getContextId()).writeTo(out); - } - public static SearchContextId decode(NamedWriteableRegistry namedWriteableRegistry, BytesReference id) { try (var in = new NamedWriteableAwareStreamInput(id.streamInput(), namedWriteableRegistry)) { final TransportVersion version = TransportVersion.readVersion(in); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java index 3071362f552ea..a70ddf6ee14b9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.search; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -21,25 +22,59 @@ public final class SearchContextIdForNode implements Writeable { private final ShardSearchContextId searchContextId; private final String clusterAlias; - SearchContextIdForNode(@Nullable String clusterAlias, String node, ShardSearchContextId searchContextId) { + /** + * Contains the details required to retrieve a {@link ShardSearchContextId} for a shard on a specific node. + * + * @param clusterAlias The alias of the cluster, or {@code null} if the shard is local. + * @param node The target node where the search context ID is defined, or {@code null} if the shard is missing or unavailable. + * @param searchContextId The {@link ShardSearchContextId}, or {@code null} if the shard is missing or unavailable. + */ + SearchContextIdForNode(@Nullable String clusterAlias, @Nullable String node, @Nullable ShardSearchContextId searchContextId) { this.node = node; this.clusterAlias = clusterAlias; this.searchContextId = searchContextId; } SearchContextIdForNode(StreamInput in) throws IOException { - this.node = in.readString(); + boolean allowNull = in.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT); + this.node = allowNull ? in.readOptionalString() : in.readString(); this.clusterAlias = in.readOptionalString(); - this.searchContextId = new ShardSearchContextId(in); + this.searchContextId = allowNull ? in.readOptionalWriteable(ShardSearchContextId::new) : new ShardSearchContextId(in); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(node); + boolean allowNull = out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT); + if (allowNull) { + out.writeOptionalString(node); + } else { + if (node == null) { + // We should never set a null node if the cluster is not fully upgraded to a version that can handle it. + throw new IOException( + "Cannot write null node value to a node in version " + + out.getTransportVersion() + + ". The target node must be specified to retrieve the ShardSearchContextId." + ); + } + out.writeString(node); + } out.writeOptionalString(clusterAlias); - searchContextId.writeTo(out); + if (allowNull) { + out.writeOptionalWriteable(searchContextId); + } else { + if (searchContextId == null) { + // We should never set a null search context id if the cluster is not fully upgraded to a version that can handle it. + throw new IOException( + "Cannot write null search context ID to a node in version " + + out.getTransportVersion() + + ". A valid search context ID is required to identify the shard's search context in this version." + ); + } + searchContextId.writeTo(out); + } } + @Nullable public String getNode() { return node; } @@ -49,6 +84,7 @@ public String getClusterAlias() { return clusterAlias; } + @Nullable public ShardSearchContextId getSearchContextId() { return searchContextId; } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index a929b774edf5e..717b1805547be 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -10,6 +10,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionType; @@ -21,6 +23,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -29,6 +32,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchShardTarget; @@ -50,6 +54,8 @@ import java.util.concurrent.Executor; import java.util.function.BiFunction; +import static org.elasticsearch.core.Strings.format; + public class TransportOpenPointInTimeAction extends HandledTransportAction { private static final Logger logger = LogManager.getLogger(TransportOpenPointInTimeAction.class); @@ -62,6 +68,7 @@ public class TransportOpenPointInTimeAction extends HandledTransportAction listener) { + final ClusterState clusterState = clusterService.state(); + // Check if all the nodes in this cluster know about the service + if (request.allowPartialSearchResults() + && clusterState.getMinTransportVersion().before(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + listener.onFailure( + new ElasticsearchStatusException( + format( + "The [allow_partial_search_results] parameter cannot be used while the cluster is still upgrading. " + + "Please wait until the upgrade is fully completed and try again." + ), + RestStatus.BAD_REQUEST + ) + ); + return; + } final SearchRequest searchRequest = new SearchRequest().indices(request.indices()) .indicesOptions(request.indicesOptions()) .preference(request.preference()) .routing(request.routing()) - .allowPartialSearchResults(false) + .allowPartialSearchResults(request.allowPartialSearchResults()) .source(new SearchSourceBuilder().query(request.indexFilter())); searchRequest.setMaxConcurrentShardRequests(request.maxConcurrentShardRequests()); searchRequest.setCcsMinimizeRoundtrips(false); transportSearchAction.executeRequest((SearchTask) task, searchRequest, listener.map(r -> { assert r.pointInTimeId() != null : r; - return new OpenPointInTimeResponse(r.pointInTimeId()); + return new OpenPointInTimeResponse( + r.pointInTimeId(), + r.getTotalShards(), + r.getSuccessfulShards(), + r.getFailedShards(), + r.getSkippedShards() + ); }), searchListener -> new OpenPointInTimePhase(request, searchListener)); } @@ -215,7 +245,9 @@ SearchPhase openPointInTimePhase( ) { @Override protected String missingShardsErrorMessage(StringBuilder missingShards) { - return "[open_point_in_time] action requires all shards to be available. Missing shards: [" + missingShards + "]"; + return "[open_point_in_time] action requires all shards to be available. Missing shards: [" + + missingShards + + "]. Consider using `allow_partial_search_results` setting to bypass this error."; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 75668f5ebce51..11e767df9c010 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -1116,11 +1116,16 @@ static List getRemoteShardsIteratorFromPointInTime( final String clusterAlias = entry.getKey(); assert clusterAlias.equals(perNode.getClusterAlias()) : clusterAlias + " != " + perNode.getClusterAlias(); final List targetNodes = new ArrayList<>(group.allocatedNodes().size()); - targetNodes.add(perNode.getNode()); - if (perNode.getSearchContextId().getSearcherId() != null) { - for (String node : group.allocatedNodes()) { - if (node.equals(perNode.getNode()) == false) { - targetNodes.add(node); + if (perNode.getNode() != null) { + // If the shard was available when the PIT was created, it's included. + // Otherwise, we add the shard iterator without a target node, allowing a partial search failure to + // be thrown when a search phase attempts to access it. + targetNodes.add(perNode.getNode()); + if (perNode.getSearchContextId().getSearcherId() != null) { + for (String node : group.allocatedNodes()) { + if (node.equals(perNode.getNode()) == false) { + targetNodes.add(node); + } } } } @@ -1216,7 +1221,7 @@ private void executeSearch( assert searchRequest.pointInTimeBuilder() != null; aliasFilter = resolvedIndices.getSearchContextId().aliasFilter(); concreteLocalIndices = resolvedIndices.getLocalIndices() == null ? new String[0] : resolvedIndices.getLocalIndices().indices(); - localShardIterators = getLocalLocalShardsIteratorFromPointInTime( + localShardIterators = getLocalShardsIteratorFromPointInTime( clusterState, searchRequest.indicesOptions(), searchRequest.getLocalClusterAlias(), @@ -1723,7 +1728,7 @@ private static RemoteTransportException wrapRemoteClusterFailure(String clusterA return new RemoteTransportException("error while communicating with remote cluster [" + clusterAlias + "]", e); } - static List getLocalLocalShardsIteratorFromPointInTime( + static List getLocalShardsIteratorFromPointInTime( ClusterState clusterState, IndicesOptions indicesOptions, String localClusterAlias, @@ -1737,25 +1742,30 @@ static List getLocalLocalShardsIteratorFromPointInTime( if (Strings.isEmpty(perNode.getClusterAlias())) { final ShardId shardId = entry.getKey(); final List targetNodes = new ArrayList<>(2); - try { - final ShardIterator shards = OperationRouting.getShards(clusterState, shardId); - // Prefer executing shard requests on nodes that are part of PIT first. - if (clusterState.nodes().nodeExists(perNode.getNode())) { - targetNodes.add(perNode.getNode()); - } - if (perNode.getSearchContextId().getSearcherId() != null) { - for (ShardRouting shard : shards) { - if (shard.currentNodeId().equals(perNode.getNode()) == false) { - targetNodes.add(shard.currentNodeId()); + if (perNode.getNode() != null) { + // If the shard was available when the PIT was created, it's included. + // Otherwise, we add the shard iterator without a target node, allowing a partial search failure to + // be thrown when a search phase attempts to access it. + try { + final ShardIterator shards = OperationRouting.getShards(clusterState, shardId); + // Prefer executing shard requests on nodes that are part of PIT first. + if (clusterState.nodes().nodeExists(perNode.getNode())) { + targetNodes.add(perNode.getNode()); + } + if (perNode.getSearchContextId().getSearcherId() != null) { + for (ShardRouting shard : shards) { + if (shard.currentNodeId().equals(perNode.getNode()) == false) { + targetNodes.add(shard.currentNodeId()); + } } } - } - } catch (IndexNotFoundException | ShardNotFoundException e) { - // We can hit these exceptions if the index was deleted after creating PIT or the cluster state on - // this coordinating node is outdated. It's fine to ignore these extra "retry-able" target shards - // when allowPartialSearchResults is false - if (allowPartialSearchResults == false) { - throw e; + } catch (IndexNotFoundException | ShardNotFoundException e) { + // We can hit these exceptions if the index was deleted after creating PIT or the cluster state on + // this coordinating node is outdated. It's fine to ignore these extra "retry-able" target shards + // when allowPartialSearchResults is false + if (allowPartialSearchResults == false) { + throw e; + } } } OriginalIndices finalIndices = new OriginalIndices(new String[] { shardId.getIndexName() }, indicesOptions); diff --git a/server/src/test/java/org/elasticsearch/action/search/RestOpenPointInTimeActionTests.java b/server/src/test/java/org/elasticsearch/action/search/RestOpenPointInTimeActionTests.java index dda977565af45..e7b5e898684f6 100644 --- a/server/src/test/java/org/elasticsearch/action/search/RestOpenPointInTimeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/RestOpenPointInTimeActionTests.java @@ -31,7 +31,7 @@ public void testMaxConcurrentSearchRequests() { verifyingClient.setExecuteVerifier(((actionType, transportRequest) -> { assertThat(transportRequest, instanceOf(OpenPointInTimeRequest.class)); transportRequests.add((OpenPointInTimeRequest) transportRequest); - return new OpenPointInTimeResponse(new BytesArray("n/a")); + return new OpenPointInTimeResponse(new BytesArray("n/a"), 1, 1, 0, 0); })); { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java index 32157e09e628f..af7068152648f 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -18,6 +19,7 @@ import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchPhaseResult; +import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.test.ESTestCase; @@ -52,40 +54,82 @@ public void testEncode() { final AtomicArray queryResults = TransportSearchHelperTests.generateQueryResults(); final TransportVersion version = TransportVersion.current(); final Map aliasFilters = new HashMap<>(); + Map shardSearchFailures = new HashMap<>(); + int idx = 0; for (SearchPhaseResult result : queryResults.asList()) { - final AliasFilter aliasFilter; if (randomBoolean()) { - aliasFilter = AliasFilter.of(randomQueryBuilder()); - } else if (randomBoolean()) { - aliasFilter = AliasFilter.of(randomQueryBuilder(), "alias-" + between(1, 10)); + shardSearchFailures.put( + result.getSearchShardTarget(), + new ShardSearchFailure( + new NoShardAvailableActionException(result.getSearchShardTarget().getShardId()), + result.getSearchShardTarget() + ) + ); + queryResults.set(idx, null); } else { - aliasFilter = AliasFilter.EMPTY; - } - if (randomBoolean()) { - aliasFilters.put(result.getSearchShardTarget().getShardId().getIndex().getUUID(), aliasFilter); + final AliasFilter aliasFilter; + if (randomBoolean()) { + aliasFilter = AliasFilter.of(randomQueryBuilder()); + } else if (randomBoolean()) { + aliasFilter = AliasFilter.of(randomQueryBuilder(), "alias-" + between(1, 10)); + } else { + aliasFilter = AliasFilter.EMPTY; + } + if (randomBoolean()) { + aliasFilters.put(result.getSearchShardTarget().getShardId().getIndex().getUUID(), aliasFilter); + } } + idx += 1; } - final BytesReference id = SearchContextId.encode(queryResults.asList(), aliasFilters, version); + final BytesReference id = SearchContextId.encode( + queryResults.asList(), + aliasFilters, + version, + shardSearchFailures.values().toArray(ShardSearchFailure[]::new) + ); final SearchContextId context = SearchContextId.decode(namedWriteableRegistry, id); assertThat(context.shards().keySet(), hasSize(3)); + // TODO assertThat(context.failedShards().keySet(), hasSize(shardsFailed)); assertThat(context.aliasFilter(), equalTo(aliasFilters)); - SearchContextIdForNode node1 = context.shards().get(new ShardId("idx", "uuid1", 2)); + + ShardId shardIdForNode1 = new ShardId("idx", "uuid1", 2); + SearchShardTarget shardTargetForNode1 = new SearchShardTarget("node_1", shardIdForNode1, "cluster_x"); + SearchContextIdForNode node1 = context.shards().get(shardIdForNode1); assertThat(node1.getClusterAlias(), equalTo("cluster_x")); - assertThat(node1.getNode(), equalTo("node_1")); - assertThat(node1.getSearchContextId().getId(), equalTo(1L)); - assertThat(node1.getSearchContextId().getSessionId(), equalTo("a")); + if (shardSearchFailures.containsKey(shardTargetForNode1)) { + assertNull(node1.getNode()); + assertNull(node1.getSearchContextId()); + } else { + assertThat(node1.getNode(), equalTo("node_1")); + assertThat(node1.getSearchContextId().getId(), equalTo(1L)); + assertThat(node1.getSearchContextId().getSessionId(), equalTo("a")); + } - SearchContextIdForNode node2 = context.shards().get(new ShardId("idy", "uuid2", 42)); + ShardId shardIdForNode2 = new ShardId("idy", "uuid2", 42); + SearchShardTarget shardTargetForNode2 = new SearchShardTarget("node_2", shardIdForNode2, "cluster_y"); + SearchContextIdForNode node2 = context.shards().get(shardIdForNode2); assertThat(node2.getClusterAlias(), equalTo("cluster_y")); - assertThat(node2.getNode(), equalTo("node_2")); - assertThat(node2.getSearchContextId().getId(), equalTo(12L)); - assertThat(node2.getSearchContextId().getSessionId(), equalTo("b")); + if (shardSearchFailures.containsKey(shardTargetForNode2)) { + assertNull(node2.getNode()); + assertNull(node2.getSearchContextId()); + } else { + assertThat(node2.getNode(), equalTo("node_2")); + assertThat(node2.getSearchContextId().getId(), equalTo(12L)); + assertThat(node2.getSearchContextId().getSessionId(), equalTo("b")); + } - SearchContextIdForNode node3 = context.shards().get(new ShardId("idy", "uuid2", 43)); + ShardId shardIdForNode3 = new ShardId("idy", "uuid2", 43); + SearchShardTarget shardTargetForNode3 = new SearchShardTarget("node_3", shardIdForNode3, null); + SearchContextIdForNode node3 = context.shards().get(shardIdForNode3); assertThat(node3.getClusterAlias(), nullValue()); - assertThat(node3.getNode(), equalTo("node_3")); - assertThat(node3.getSearchContextId().getId(), equalTo(42L)); - assertThat(node3.getSearchContextId().getSessionId(), equalTo("c")); + if (shardSearchFailures.containsKey(shardTargetForNode3)) { + assertNull(node3.getNode()); + assertNull(node3.getSearchContextId()); + } else { + assertThat(node3.getNode(), equalTo("node_3")); + assertThat(node3.getSearchContextId().getId(), equalTo(42L)); + assertThat(node3.getSearchContextId().getSessionId(), equalTo("c")); + } final String[] indices = SearchContextId.decodeIndices(id); assertThat(indices.length, equalTo(3)); diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index edd253e945a9b..6621f2055968f 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -1646,7 +1646,7 @@ public void testLocalShardIteratorFromPointInTime() { } TimeValue keepAlive = randomBoolean() ? null : TimeValue.timeValueSeconds(between(30, 3600)); - final List shardIterators = TransportSearchAction.getLocalLocalShardsIteratorFromPointInTime( + final List shardIterators = TransportSearchAction.getLocalShardsIteratorFromPointInTime( clusterState, null, null, @@ -1691,7 +1691,7 @@ public void testLocalShardIteratorFromPointInTime() { ) ); IndexNotFoundException error = expectThrows(IndexNotFoundException.class, () -> { - TransportSearchAction.getLocalLocalShardsIteratorFromPointInTime( + TransportSearchAction.getLocalShardsIteratorFromPointInTime( clusterState, null, null, @@ -1702,7 +1702,7 @@ public void testLocalShardIteratorFromPointInTime() { }); assertThat(error.getIndex().getName(), equalTo("another-index")); // Ok when some indices don't exist and `allowPartialSearchResults` is true. - Optional anotherShardIterator = TransportSearchAction.getLocalLocalShardsIteratorFromPointInTime( + Optional anotherShardIterator = TransportSearchAction.getLocalShardsIteratorFromPointInTime( clusterState, null, null, diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index cf469546b6f63..4bdbc81bcc3f0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -956,6 +956,13 @@ public ClusterHealthStatus ensureYellow(String... indices) { return ensureColor(ClusterHealthStatus.YELLOW, TimeValue.timeValueSeconds(30), false, indices); } + /** + * Ensures the cluster has a red state via the cluster health API. + */ + public ClusterHealthStatus ensureRed(String... indices) { + return ensureColor(ClusterHealthStatus.RED, TimeValue.timeValueSeconds(30), false, indices); + } + /** * Ensures the cluster has a yellow state via the cluster health API and ensures the that cluster has no initializing shards * for the given indices diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java index 1652495197fc0..9cd6549b4be2c 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java @@ -209,7 +209,7 @@ protected void ActionListener listener ) { if (request instanceof OpenPointInTimeRequest) { - OpenPointInTimeResponse response = new OpenPointInTimeResponse(pitId); + OpenPointInTimeResponse response = new OpenPointInTimeResponse(pitId, 1, 1, 0, 0); listener.onResponse((Response) response); } else if (request instanceof ClosePointInTimeRequest) { ClosePointInTimeResponse response = new ClosePointInTimeResponse(true, 1); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java index c0e5d398d6508..f1c5d483d4002 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java @@ -204,7 +204,7 @@ protected void assertArrayEquals(INDICES, openPIT.indices()); // indices for opening pit should be the same as for the eql query itself openedPIT = true; - OpenPointInTimeResponse response = new OpenPointInTimeResponse(pitId); + OpenPointInTimeResponse response = new OpenPointInTimeResponse(pitId, 1, 1, 0, 0); listener.onResponse((Response) response); } else if (request instanceof ClosePointInTimeRequest closePIT) { assertTrue(openedPIT); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java index c001b312d5578..ecf5ef61ac49a 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java @@ -394,7 +394,7 @@ protected void ) { if (request instanceof OpenPointInTimeRequest) { pitContextCounter.incrementAndGet(); - OpenPointInTimeResponse response = new OpenPointInTimeResponse(pitId); + OpenPointInTimeResponse response = new OpenPointInTimeResponse(pitId, 1, 1, 0, 0); listener.onResponse((Response) response); } else if (request instanceof ClosePointInTimeRequest) { ClosePointInTimeResponse response = new ClosePointInTimeResponse(true, 1); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 5f878480a7d0d..3be0a17d19253 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -66,6 +66,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.SearchTransportService; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.search.TransportClearScrollAction; import org.elasticsearch.action.search.TransportClosePointInTimeAction; import org.elasticsearch.action.search.TransportMultiSearchAction; @@ -3650,7 +3651,7 @@ private static BytesReference createEncodedPIT(Index index) { ); List results = new ArrayList<>(); results.add(testSearchPhaseResult1); - return SearchContextId.encode(results, Collections.emptyMap(), TransportVersion.current()); + return SearchContextId.encode(results, Collections.emptyMap(), TransportVersion.current(), ShardSearchFailure.EMPTY_ARRAY); } private static class RBACAuthorizationInfoRoleMatcher implements ArgumentMatcher { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java index 10d6b04d7505c..3e1f910c9f72e 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java @@ -190,7 +190,7 @@ public void testCancellationDuringSearch(String query) throws InterruptedExcepti doAnswer(invocation -> { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[2]; - listener.onResponse(new OpenPointInTimeResponse(pitId)); + listener.onResponse(new OpenPointInTimeResponse(pitId, 1, 1, 0, 0)); return null; }).when(client).execute(eq(TransportOpenPointInTimeAction.TYPE), any(), any()); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java index 062c951f67c96..c8677c2816fc9 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java @@ -538,7 +538,7 @@ protected void if (request instanceof OpenPointInTimeRequest) { if (pitSupported) { pitContextCounter.incrementAndGet(); - OpenPointInTimeResponse response = new OpenPointInTimeResponse(new BytesArray("the_pit_id")); + OpenPointInTimeResponse response = new OpenPointInTimeResponse(new BytesArray("the_pit_id"), 1, 1, 0, 0); listener.onResponse((Response) response); } else { listener.onFailure(new ActionNotFoundTransportException("_pit")); From 030f42576987f1a2c31398cfa1ee9ce3b27fb00d Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 26 Aug 2024 12:13:00 +0200 Subject: [PATCH 198/389] Make CompatibilityVersions.minimumVersions cheaper (#112186) This is a significant portion of CS updates at the moment. We should look into avoiding the computation here altogether unless the nodes change, but until then this is a trivial ~5x speedup that saves loads cluster state thread time, especially in integration tests. --- .../version/CompatibilityVersions.java | 37 +++++++++++-------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java b/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java index a92cf1ce2e42c..c1489afc6c369 100644 --- a/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java +++ b/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java @@ -19,12 +19,9 @@ import java.io.IOException; import java.util.Collection; -import java.util.Comparator; import java.util.HashMap; import java.util.Map; import java.util.Objects; -import java.util.stream.Collectors; -import java.util.stream.Stream; /** * Wraps component version numbers for cluster state @@ -42,6 +39,8 @@ public record CompatibilityVersions( Map systemIndexMappingsVersion ) implements Writeable, ToXContentFragment { + public static final CompatibilityVersions EMPTY = new CompatibilityVersions(TransportVersions.MINIMUM_COMPATIBLE, Map.of()); + /** * Constructs a VersionWrapper collecting all the minimum versions from the values of the map. * @@ -49,18 +48,26 @@ public record CompatibilityVersions( * @return Minimum versions for the cluster */ public static CompatibilityVersions minimumVersions(Collection compatibilityVersions) { - TransportVersion minimumTransport = compatibilityVersions.stream() - .map(CompatibilityVersions::transportVersion) - .min(Comparator.naturalOrder()) - // In practice transportVersions is always nonempty (except in tests) but use a conservative default anyway: - .orElse(TransportVersions.MINIMUM_COMPATIBLE); - - Map minimumMappingsVersions = compatibilityVersions.stream() - .flatMap(mv -> mv.systemIndexMappingsVersion().entrySet().stream()) - .collect( - Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (v1, v2) -> Stream.of(v1, v2).min(Comparator.naturalOrder()).get()) - ); - + if (compatibilityVersions.isEmpty()) { + return EMPTY; + } + TransportVersion minimumTransport = null; + Map minimumMappingsVersions = null; + for (CompatibilityVersions cv : compatibilityVersions) { + TransportVersion version = cv.transportVersion(); + if (minimumTransport == null) { + minimumTransport = version; + minimumMappingsVersions = new HashMap<>(cv.systemIndexMappingsVersion()); + continue; + } + if (version.compareTo(minimumTransport) < 0) { + minimumTransport = version; + } + for (Map.Entry entry : cv.systemIndexMappingsVersion().entrySet()) { + minimumMappingsVersions.merge(entry.getKey(), entry.getValue(), (v1, v2) -> v1.compareTo(v2) < 0 ? v1 : v2); + } + } + // transportVersions is always non-null since we break out on empty above return new CompatibilityVersions(minimumTransport, minimumMappingsVersions); } From 48c32f133e4111b65b3d5f0548051a224d4d9b71 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 26 Aug 2024 12:14:11 +0200 Subject: [PATCH 199/389] Speedup string interning in `ClusterName` (#112045) If we want to intern here, we should use the deduplicator to speed things up. --- .../src/main/java/org/elasticsearch/cluster/ClusterName.java | 3 ++- .../main/java/org/elasticsearch/common/settings/Settings.java | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterName.java b/server/src/main/java/org/elasticsearch/cluster/ClusterName.java index 711c2a7fee8e0..dd4194b60e6ac 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterName.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterName.java @@ -39,7 +39,8 @@ public ClusterName(StreamInput input) throws IOException { } public ClusterName(String value) { - this.value = value.intern(); + // cluster name string is most likely part of a setting so we can speed things up over outright interning here + this.value = Settings.internKeyOrValue(value); } public String value() { diff --git a/server/src/main/java/org/elasticsearch/common/settings/Settings.java b/server/src/main/java/org/elasticsearch/common/settings/Settings.java index be8292f02bb59..1df7b27304fd0 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -1567,7 +1567,7 @@ private static String toString(Object o) { * @param s string to intern * @return interned string */ - static String internKeyOrValue(String s) { + public static String internKeyOrValue(String s) { return settingLiteralDeduplicator.deduplicate(s); } From 8431c3645406e56fe6b1147732783652b699bceb Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Mon, 26 Aug 2024 12:54:18 +0200 Subject: [PATCH 200/389] Support docvalues only query in shape field (#112199) --- docs/changelog/112199.yaml | 5 ++ .../search/ShapeQueryOverShapeTests.java | 8 +-- .../index/mapper/ShapeFieldMapper.java | 15 +++++- .../index/query/ShapeQueryProcessor.java | 49 +++++-------------- .../spatial/ingest/CircleProcessorTests.java | 10 +--- 5 files changed, 38 insertions(+), 49 deletions(-) create mode 100644 docs/changelog/112199.yaml diff --git a/docs/changelog/112199.yaml b/docs/changelog/112199.yaml new file mode 100644 index 0000000000000..eb22f215f9828 --- /dev/null +++ b/docs/changelog/112199.yaml @@ -0,0 +1,5 @@ +pr: 112199 +summary: Support docvalues only query in shape field +area: Geo +type: enhancement +issues: [] diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverShapeTests.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverShapeTests.java index 554c9ff2904dc..1c013aba52261 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverShapeTests.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverShapeTests.java @@ -51,16 +51,18 @@ public class ShapeQueryOverShapeTests extends ShapeQueryTestCase { @Override protected XContentBuilder createDefaultMapping() throws Exception { - XContentBuilder xcb = XContentFactory.jsonBuilder() + final boolean isIndexed = randomBoolean(); + final boolean hasDocValues = isIndexed == false || randomBoolean(); + return XContentFactory.jsonBuilder() .startObject() .startObject("properties") .startObject(defaultFieldName) .field("type", "shape") + .field("index", isIndexed) + .field("doc_values", hasDocValues) .endObject() .endObject() .endObject(); - - return xcb; } @Override diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java index 91a118f964064..ab57efee527dc 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java @@ -26,6 +26,7 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; +import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.lucene.spatial.BinaryShapeDocValuesField; import org.elasticsearch.lucene.spatial.CartesianShapeIndexer; @@ -162,7 +163,19 @@ public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext @Override public Query shapeQuery(Geometry shape, String fieldName, ShapeRelation relation, SearchExecutionContext context) { - return queryProcessor.shapeQuery(shape, fieldName, relation, context, hasDocValues()); + failIfNotIndexedNorDocValuesFallback(context); + // CONTAINS queries are not supported by VECTOR strategy for indices created before version 7.5.0 (Lucene 8.3.0); + if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(IndexVersions.V_7_5_0)) { + throw new QueryShardException( + context, + ShapeRelation.CONTAINS + " query relation not supported for Field [" + fieldName + "]." + ); + } + try { + return queryProcessor.shapeQuery(shape, fieldName, relation, isIndexed(), hasDocValues()); + } catch (IllegalArgumentException e) { + throw new QueryShardException(context, "Exception creating query on Field [" + fieldName + "] " + e.getMessage(), e); + } } @Override diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryProcessor.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryProcessor.java index cd09b74e99591..25a0e55c027f5 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryProcessor.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryProcessor.java @@ -14,51 +14,26 @@ import org.elasticsearch.common.geo.LuceneGeometriesUtils; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.index.IndexVersions; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.query.QueryShardException; -import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.lucene.spatial.CartesianShapeDocValuesQuery; -import org.elasticsearch.xpack.spatial.index.mapper.ShapeFieldMapper; public class ShapeQueryProcessor { - public Query shapeQuery( - Geometry geometry, - String fieldName, - ShapeRelation relation, - SearchExecutionContext context, - boolean hasDocValues - ) { - validateIsShapeFieldType(fieldName, context); - // CONTAINS queries are not supported by VECTOR strategy for indices created before version 7.5.0 (Lucene 8.3.0); - if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(IndexVersions.V_7_5_0)) { - throw new QueryShardException(context, ShapeRelation.CONTAINS + " query relation not supported for Field [" + fieldName + "]."); - } + public Query shapeQuery(Geometry geometry, String fieldName, ShapeRelation relation, boolean indexed, boolean hasDocValues) { + assert indexed || hasDocValues; if (geometry == null || geometry.isEmpty()) { return new MatchNoDocsQuery(); } - final XYGeometry[] luceneGeometries; - try { - luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); - } catch (IllegalArgumentException e) { - throw new QueryShardException(context, "Exception creating query on Field [" + fieldName + "] " + e.getMessage(), e); - } - Query query = XYShape.newGeometryQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); - if (hasDocValues) { - final Query queryDocValues = new CartesianShapeDocValuesQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); - query = new IndexOrDocValuesQuery(query, queryDocValues); + final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); + Query query; + if (indexed) { + query = XYShape.newGeometryQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); + if (hasDocValues) { + final Query queryDocValues = new CartesianShapeDocValuesQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); + query = new IndexOrDocValuesQuery(query, queryDocValues); + } + } else { + query = new CartesianShapeDocValuesQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); } return query; } - - private void validateIsShapeFieldType(String fieldName, SearchExecutionContext context) { - MappedFieldType fieldType = context.getFieldType(fieldName); - if (fieldType instanceof ShapeFieldMapper.ShapeFieldType == false) { - throw new QueryShardException( - context, - "Expected " + ShapeFieldMapper.CONTENT_TYPE + " field type for Field [" + fieldName + "] but found " + fieldType.typeName() - ); - } - } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java index 20b1a906b1dab..e71b4f0f4e981 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.geometry.utils.WellKnownText; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.GeoShapeIndexer; -import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.RandomDocumentPicks; @@ -39,7 +38,6 @@ import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.spatial.index.mapper.GeoShapeWithDocValuesFieldMapper.GeoShapeWithDocValuesFieldType; -import org.elasticsearch.xpack.spatial.index.mapper.ShapeFieldMapper.ShapeFieldType; import org.elasticsearch.xpack.spatial.index.query.ShapeQueryProcessor; import java.io.IOException; @@ -244,17 +242,13 @@ public void testShapeQuery() throws IOException { int numSides = randomIntBetween(4, 1000); Geometry geometry = CircleUtils.createRegularShapePolygon(circle, numSides); - MappedFieldType shapeType = new ShapeFieldType(fieldName, true, true, Orientation.RIGHT, null, Collections.emptyMap()); - ShapeQueryProcessor processor = new ShapeQueryProcessor(); - SearchExecutionContext mockedContext = mock(SearchExecutionContext.class); - when(mockedContext.getFieldType(any())).thenReturn(shapeType); - Query sameShapeQuery = processor.shapeQuery(geometry, fieldName, ShapeRelation.INTERSECTS, mockedContext, true); + Query sameShapeQuery = processor.shapeQuery(geometry, fieldName, ShapeRelation.INTERSECTS, true, true); Query centerPointQuery = processor.shapeQuery( new Point(circle.getLon(), circle.getLat()), fieldName, ShapeRelation.INTERSECTS, - mockedContext, + true, true ); From 554eb4f693fd6934103fd3cf1e580f088cc8e064 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Mon, 26 Aug 2024 13:00:18 +0200 Subject: [PATCH 201/389] ES|QL: better validation of GROK patterns (#112200) Catch exceptions when building GROK with a wrong pattern, and emit a client exception with a meaningful error message. Fixes https://github.com/elastic/elasticsearch/issues/112111 --- docs/changelog/112200.yaml | 6 ++++++ .../xpack/esql/parser/LogicalPlanBuilder.java | 8 +++++++- .../xpack/esql/parser/StatementParserTests.java | 5 +++++ 3 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/112200.yaml diff --git a/docs/changelog/112200.yaml b/docs/changelog/112200.yaml new file mode 100644 index 0000000000000..0c2c3d71e3ddf --- /dev/null +++ b/docs/changelog/112200.yaml @@ -0,0 +1,6 @@ +pr: 112200 +summary: "ES|QL: better validation of GROK patterns" +area: ES|QL +type: bug +issues: + - 112111 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java index 0c4272a05a44e..ffd2375a688ad 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java @@ -57,6 +57,7 @@ import org.elasticsearch.xpack.esql.plan.logical.meta.MetaFunctions; import org.elasticsearch.xpack.esql.plan.logical.show.ShowInfo; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.joni.exception.SyntaxException; import java.util.ArrayList; import java.util.Arrays; @@ -153,7 +154,12 @@ public PlanFactory visitGrokCommand(EsqlBaseParser.GrokCommandContext ctx) { return p -> { Source source = source(ctx); String pattern = visitString(ctx.string()).fold().toString(); - Grok.Parser grokParser = Grok.pattern(source, pattern); + Grok.Parser grokParser; + try { + grokParser = Grok.pattern(source, pattern); + } catch (SyntaxException e) { + throw new ParsingException(source, "Invalid grok pattern [{}]: [{}]", pattern, e.getMessage()); + } validateGrokPattern(source, grokParser, pattern); Grok result = new Grok(source(ctx), p, expression(ctx.primaryExpression()), grokParser); return result; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index 3860088bf130c..a5ef7900a1a78 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -791,6 +791,11 @@ public void testGrokPattern() { "line 1:22: Invalid GROK pattern [%{NUMBER:foo} %{WORD:foo}]:" + " the attribute [foo] is defined multiple times with different types" ); + + expectError( + "row a = \"foo\" | GROK a \"(?P.+)\"", + "line 1:18: Invalid grok pattern [(?P.+)]: [undefined group option]" + ); } public void testLikeRLike() { From de73cda9c39ad45c88535e9310f6ecaa3883b3f0 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Mon, 26 Aug 2024 13:02:38 +0200 Subject: [PATCH 202/389] SQL: make date format functions more strict (#112140) --- .../src/main/resources/datetime.csv-spec | 18 +++++ .../scalar/datetime/DateFormatter.java | 27 ++++--- .../datetime/DateTimeFormatProcessor.java | 4 +- .../datetime/NamedDateTimeProcessor.java | 4 +- .../scalar/datetime/ToCharFormatter.java | 76 ++++++++++--------- 5 files changed, 79 insertions(+), 50 deletions(-) diff --git a/x-pack/plugin/sql/qa/server/src/main/resources/datetime.csv-spec b/x-pack/plugin/sql/qa/server/src/main/resources/datetime.csv-spec index 90280676ff6d4..a67ab25bcf66a 100644 --- a/x-pack/plugin/sql/qa/server/src/main/resources/datetime.csv-spec +++ b/x-pack/plugin/sql/qa/server/src/main/resources/datetime.csv-spec @@ -1922,3 +1922,21 @@ SELECT hire_date FROM test_emp WHERE '2005-12-14T00:00:00.000Z'::datetime BETWEE ------------------------ ; + +// checking regressions after https://github.com/elastic/elasticsearch/pull/110222 +selectDateFunctionsCldr +schema::month:s|day_of_week:s|day:s|week:s|ad:s|day_of_week2:s|month2:s|ad2:s +SELECT DATE_FORMAT('2020-04-05T11:22:33.123Z'::date, '%M') AS month, +DATE_FORMAT('2020-04-05T11:22:33.123Z'::date, '%W') AS day_of_week, +DATE_FORMAT('2020-04-05T11:22:33.123Z'::date, '%w') AS day, +DATE_FORMAT('2020-04-05T11:22:33.123Z'::date, '%v') AS week, +DATETIME_FORMAT('2020-04-05T11:22:33.123Z'::date, 'G') AS ad, +TO_CHAR('2020-04-05T11:22:33.123Z'::date, 'Day') AS day_of_week2, +TO_CHAR('2020-04-05T11:22:33.123Z'::date, 'Month') AS month2, +TO_CHAR('2020-04-05T11:22:33.123Z'::date, 'BC') AS ad2; + + month | day_of_week | day | week | ad | day_of_week2 | month2 | ad2 +--------------+-----------------+---------+---------+--------+----------------+--------------+------------ + April | Sunday | 0 | 14 | AD | "Sunday " | "April " | AD +; + diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateFormatter.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateFormatter.java index 7016d6cb49e46..68f9acd165888 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateFormatter.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateFormatter.java @@ -42,7 +42,7 @@ class DateFormatter { new Builder().pattern("%e").javaPattern("d").build(), new Builder().pattern("%f") .javaPattern("n") - .additionalMapper(s -> String.format(Locale.ROOT, "%06d", Math.round(Integer.parseInt(s) / 1000.0))) + .additionalMapper(s -> String.format(Locale.ENGLISH, "%06d", Math.round(Integer.parseInt(s) / 1000.0))) .build(), new Builder().pattern("%H").javaPattern("HH").build(), new Builder().pattern("%h").javaPattern("hh").build(), @@ -59,19 +59,28 @@ class DateFormatter { new Builder().pattern("%s").javaPattern("ss").build(), new Builder().pattern("%T").javaPattern("HH:mm:ss").build(), new Builder().pattern("%U") - .javaFormat(t -> String.format(Locale.ROOT, "%02d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).weekOfYear()))) + .javaFormat(t -> String.format(Locale.ENGLISH, "%02d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).weekOfYear()))) .build(), - new Builder().pattern("%u").javaFormat(t -> String.format(Locale.ROOT, "%02d", t.get(WeekFields.ISO.weekOfYear()))).build(), + new Builder().pattern("%u") + .javaFormat(t -> String.format(Locale.ENGLISH, "%02d", t.get(WeekFields.of(DayOfWeek.MONDAY, 4).weekOfYear()))) + .build(), + new Builder().pattern("%V") - .javaFormat(t -> String.format(Locale.ROOT, "%02d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).weekOfWeekBasedYear()))) + .javaFormat(t -> String.format(Locale.ENGLISH, "%02d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).weekOfWeekBasedYear()))) + .build(), + new Builder().pattern("%v") + .javaFormat(t -> String.format(Locale.ENGLISH, "%02d", t.get(WeekFields.of(DayOfWeek.MONDAY, 4).weekOfWeekBasedYear()))) .build(), - new Builder().pattern("%v").javaPattern("ww").build(), new Builder().pattern("%W").javaPattern("EEEE").build(), - new Builder().pattern("%w").javaPattern("e").additionalMapper(s -> Integer.parseInt(s) == 7 ? String.valueOf(0) : s).build(), + new Builder().pattern("%w") + .javaFormat(t -> String.format(Locale.ENGLISH, "%01d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).dayOfWeek()) - 1)) + .build(), new Builder().pattern("%X") - .javaFormat(t -> String.format(Locale.ROOT, "%04d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).weekBasedYear()))) + .javaFormat(t -> String.format(Locale.ENGLISH, "%04d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).weekBasedYear()))) + .build(), + new Builder().pattern("%x") + .javaFormat(t -> String.format(Locale.ENGLISH, "%04d", t.get(WeekFields.of(DayOfWeek.MONDAY, 7).weekBasedYear()))) .build(), - new Builder().pattern("%x").javaPattern("Y").build(), new Builder().pattern("%Y").javaPattern("yyyy").build(), new Builder().pattern("%y").javaPattern("yy").build() ); @@ -162,7 +171,7 @@ private Builder pattern(String pattern) { } private Builder javaPattern(String javaPattern) { - this.javaFormat = temporalAccessor -> DateTimeFormatter.ofPattern(javaPattern, Locale.ROOT).format(temporalAccessor); + this.javaFormat = temporalAccessor -> DateTimeFormatter.ofPattern(javaPattern, Locale.ENGLISH).format(temporalAccessor); return this; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFormatProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFormatProcessor.java index 14ae0d3d1ed16..422e23f064cbf 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFormatProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFormatProcessor.java @@ -85,7 +85,7 @@ protected Function formatterFor(String pattern) { return null; } final String javaPattern = msToJavaPattern(pattern); - return DateTimeFormatter.ofPattern(javaPattern, Locale.ROOT)::format; + return DateTimeFormatter.ofPattern(javaPattern, Locale.ENGLISH)::format; } }, DATE_FORMAT { @@ -97,7 +97,7 @@ protected Function formatterFor(String pattern) { DATE_TIME_FORMAT { @Override protected Function formatterFor(String pattern) { - return DateTimeFormatter.ofPattern(pattern, Locale.ROOT)::format; + return DateTimeFormatter.ofPattern(pattern, Locale.ENGLISH)::format; } }, TO_CHAR { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java index 1d2e2735d86e3..c62f4303d93d6 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java @@ -40,8 +40,8 @@ public final String extract(ZonedDateTime millis, String tzId) { } public static final String NAME = "ndt"; - private static final DateTimeFormatter DAY_NAME_FORMATTER = DateTimeFormatter.ofPattern("EEEE", Locale.ROOT); - private static final DateTimeFormatter MONTH_NAME_FORMATTER = DateTimeFormatter.ofPattern("MMMM", Locale.ROOT); + private static final DateTimeFormatter DAY_NAME_FORMATTER = DateTimeFormatter.ofPattern("EEEE", Locale.ENGLISH); + private static final DateTimeFormatter MONTH_NAME_FORMATTER = DateTimeFormatter.ofPattern("MMMM", Locale.ENGLISH); private final NameExtractor extractor; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/ToCharFormatter.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/ToCharFormatter.java index 6e7b0fcb47a06..4c9e851f54e36 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/ToCharFormatter.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/ToCharFormatter.java @@ -41,7 +41,7 @@ class ToCharFormatter { of("HH12").formatFn("hh").numeric(), of("HH24").formatFn("HH").numeric(), of("MI").formatFn("mm").numeric(), - of("SS").formatFn("s", x -> String.format(Locale.ROOT, "%02d", parseInt(x))).numeric(), + of("SS").formatFn("s", x -> String.format(Locale.ENGLISH, "%02d", parseInt(x))).numeric(), of("MS").formatFn("n", nano -> firstDigitsOfNanos(nano, 3)).numericWithLeadingZeros(), of("US").formatFn("n", nano -> firstDigitsOfNanos(nano, 6)).numericWithLeadingZeros(), of("FF1").formatFn("n", nano -> firstDigitsOfNanos(nano, 1)).numericWithLeadingZeros(), @@ -52,14 +52,14 @@ class ToCharFormatter { of("FF6").formatFn("n", nano -> firstDigitsOfNanos(nano, 6)).numericWithLeadingZeros(), of("SSSSS").formatFn("A", milliSecondOfDay -> String.valueOf(parseInt(milliSecondOfDay) / 1000)).numeric(), of("SSSS").formatFn("A", milliSecondOfDay -> String.valueOf(parseInt(milliSecondOfDay) / 1000)).numeric(), - of("AM").formatFn("a", x -> x.toUpperCase(Locale.ROOT)).text(), - of("am").formatFn("a", x -> x.toLowerCase(Locale.ROOT)).text(), - of("PM").formatFn("a", x -> x.toUpperCase(Locale.ROOT)).text(), - of("pm").formatFn("a", x -> x.toLowerCase(Locale.ROOT)).text(), + of("AM").formatFn("a", x -> x.toUpperCase(Locale.ENGLISH)).text(), + of("am").formatFn("a", x -> x.toLowerCase(Locale.ENGLISH)).text(), + of("PM").formatFn("a", x -> x.toUpperCase(Locale.ENGLISH)).text(), + of("pm").formatFn("a", x -> x.toLowerCase(Locale.ENGLISH)).text(), of("A.M.").formatFn("a", x -> x.charAt(0) + "." + x.charAt(1) + ".").text(), - of("a.m.").formatFn("a", x -> (x.charAt(0) + "." + x.charAt(1) + ".").toLowerCase(Locale.ROOT)).text(), + of("a.m.").formatFn("a", x -> (x.charAt(0) + "." + x.charAt(1) + ".").toLowerCase(Locale.ENGLISH)).text(), of("P.M.").formatFn("a", x -> x.charAt(0) + "." + x.charAt(1) + ".").text(), - of("p.m.").formatFn("a", x -> (x.charAt(0) + "." + x.charAt(1) + ".").toLowerCase(Locale.ROOT)).text(), + of("p.m.").formatFn("a", x -> (x.charAt(0) + "." + x.charAt(1) + ".").toLowerCase(Locale.ENGLISH)).text(), of("Y,YYY").formatFn("yyyy", year -> year.charAt(0) + "," + year.substring(1)).numericWithLeadingZeros(), of("YYYY").formatFn("yyyy").numeric(), of("YYY").formatFn("yyyy", year -> year.substring(1)).numeric(), @@ -70,51 +70,53 @@ class ToCharFormatter { of("IY").formatFn(t -> lastNCharacter(absoluteWeekBasedYear(t), 2)).numeric(), of("I").formatFn(t -> lastNCharacter(absoluteWeekBasedYear(t), 1)).numeric(), of("BC").formatFn("G").text(), - of("bc").formatFn("G", x -> x.toLowerCase(Locale.ROOT)).text(), + of("bc").formatFn("G", x -> x.toLowerCase(Locale.ENGLISH)).text(), of("AD").formatFn("G").text(), - of("ad").formatFn("G", x -> x.toLowerCase(Locale.ROOT)).text(), + of("ad").formatFn("G", x -> x.toLowerCase(Locale.ENGLISH)).text(), of("B.C.").formatFn("G", x -> x.charAt(0) + "." + x.charAt(1) + ".").text(), - of("b.c.").formatFn("G", x -> (x.charAt(0) + "." + x.charAt(1) + ".").toLowerCase(Locale.ROOT)).text(), + of("b.c.").formatFn("G", x -> (x.charAt(0) + "." + x.charAt(1) + ".").toLowerCase(Locale.ENGLISH)).text(), of("A.D.").formatFn("G", x -> x.charAt(0) + "." + x.charAt(1) + ".").text(), - of("a.d.").formatFn("G", x -> (x.charAt(0) + "." + x.charAt(1) + ".").toLowerCase(Locale.ROOT)).text(), - of("MONTH").formatFn("MMMM", x -> String.format(Locale.ROOT, "%-9s", x.toUpperCase(Locale.ROOT))).text(), - of("Month").formatFn("MMMM", x -> String.format(Locale.ROOT, "%-9s", x)).text(), - of("month").formatFn("MMMM", x -> String.format(Locale.ROOT, "%-9s", x.toLowerCase(Locale.ROOT))).text(), - of("MON").formatFn("MMM", x -> x.toUpperCase(Locale.ROOT)).text(), + of("a.d.").formatFn("G", x -> (x.charAt(0) + "." + x.charAt(1) + ".").toLowerCase(Locale.ENGLISH)).text(), + of("MONTH").formatFn("MMMM", x -> String.format(Locale.ENGLISH, "%-9s", x.toUpperCase(Locale.ENGLISH))).text(), + of("Month").formatFn("MMMM", x -> String.format(Locale.ENGLISH, "%-9s", x)).text(), + of("month").formatFn("MMMM", x -> String.format(Locale.ENGLISH, "%-9s", x.toLowerCase(Locale.ENGLISH))).text(), + of("MON").formatFn("MMM", x -> x.toUpperCase(Locale.ENGLISH)).text(), of("Mon").formatFn("MMM").text(), - of("mon").formatFn("MMM", x -> x.toLowerCase(Locale.ROOT)).text(), + of("mon").formatFn("MMM", x -> x.toLowerCase(Locale.ENGLISH)).text(), of("MM").formatFn("MM").numeric(), - of("DAY").formatFn("EEEE", x -> String.format(Locale.ROOT, "%-9s", x.toUpperCase(Locale.ROOT))).text(), - of("Day").formatFn("EEEE", x -> String.format(Locale.ROOT, "%-9s", x)).text(), - of("day").formatFn("EEEE", x -> String.format(Locale.ROOT, "%-9s", x.toLowerCase(Locale.ROOT))).text(), - of("DY").formatFn("E", x -> x.toUpperCase(Locale.ROOT)).text(), + of("DAY").formatFn("EEEE", x -> String.format(Locale.ENGLISH, "%-9s", x.toUpperCase(Locale.ENGLISH))).text(), + of("Day").formatFn("EEEE", x -> String.format(Locale.ENGLISH, "%-9s", x)).text(), + of("day").formatFn("EEEE", x -> String.format(Locale.ENGLISH, "%-9s", x.toLowerCase(Locale.ENGLISH))).text(), + of("DY").formatFn("E", x -> x.toUpperCase(Locale.ENGLISH)).text(), of("Dy").formatFn("E").text(), - of("dy").formatFn("E", x -> x.toLowerCase(Locale.ROOT)).text(), + of("dy").formatFn("E", x -> x.toLowerCase(Locale.ENGLISH)).text(), of("DDD").formatFn("DDD").numeric(), of("IDDD").formatFn( t -> String.format( - Locale.ROOT, + Locale.ENGLISH, "%03d", (t.get(WeekFields.ISO.weekOfWeekBasedYear()) - 1) * 7 + t.get(ChronoField.DAY_OF_WEEK) ) ).numeric(), - of("DD").formatFn("d", x -> String.format(Locale.ROOT, "%02d", parseInt(x))).numeric(), + of("DD").formatFn("d", x -> String.format(Locale.ENGLISH, "%02d", parseInt(x))).numeric(), of("ID").formatFn(t -> String.valueOf(t.get(ChronoField.DAY_OF_WEEK))).numeric(), of("D").formatFn(t -> String.valueOf(t.get(WeekFields.SUNDAY_START.dayOfWeek()))).numeric(), of("W").formatFn(t -> String.valueOf(t.get(ChronoField.ALIGNED_WEEK_OF_MONTH))).numeric(), - of("WW").formatFn(t -> String.format(Locale.ROOT, "%02d", t.get(ChronoField.ALIGNED_WEEK_OF_YEAR))).numeric(), - of("IW").formatFn(t -> String.format(Locale.ROOT, "%02d", t.get(WeekFields.ISO.weekOfWeekBasedYear()))).numeric(), + of("WW").formatFn(t -> String.format(Locale.ENGLISH, "%02d", t.get(ChronoField.ALIGNED_WEEK_OF_YEAR))).numeric(), + of("IW").formatFn(t -> String.format(Locale.ENGLISH, "%02d", t.get(WeekFields.ISO.weekOfWeekBasedYear()))).numeric(), of("CC").formatFn(t -> { int century = yearToCentury(t.get(ChronoField.YEAR)); - return String.format(Locale.ROOT, century < 0 ? "%03d" : "%02d", century); + return String.format(Locale.ENGLISH, century < 0 ? "%03d" : "%02d", century); }).numeric(), of("J").formatFn(t -> String.valueOf(t.getLong(JulianFields.JULIAN_DAY))).numeric(), of("Q").formatFn("Q").numeric(), - of("RM").formatFn("MM", month -> String.format(Locale.ROOT, "%-4s", monthToRoman(parseInt(month)))).text(), - of("rm").formatFn("MM", month -> String.format(Locale.ROOT, "%-4s", monthToRoman(parseInt(month)).toLowerCase(Locale.ROOT))) - .text(), + of("RM").formatFn("MM", month -> String.format(Locale.ENGLISH, "%-4s", monthToRoman(parseInt(month)))).text(), + of("rm").formatFn( + "MM", + month -> String.format(Locale.ENGLISH, "%-4s", monthToRoman(parseInt(month)).toLowerCase(Locale.ENGLISH)) + ).text(), of("TZ").formatFn(ToCharFormatter::zoneAbbreviationOf).text(), - of("tz").formatFn(t -> zoneAbbreviationOf(t).toLowerCase(Locale.ROOT)).text(), + of("tz").formatFn(t -> zoneAbbreviationOf(t).toLowerCase(Locale.ENGLISH)).text(), of("TZH").acceptsLowercase(false).formatFn("ZZ", s -> s.substring(0, 3)).text(), of("TZM").acceptsLowercase(false).formatFn("ZZ", s -> lastNCharacter(s, 2)).text(), of("OF").acceptsLowercase(false).formatFn("ZZZZZ", ToCharFormatter::formatOffset).offset() @@ -127,7 +129,7 @@ class ToCharFormatter { // also index the lower case version of the patterns if accepted for (ToCharFormatter formatter : formatters) { if (formatter.acceptsLowercase) { - formatterMap.putIfAbsent(formatter.pattern.toLowerCase(Locale.ROOT), formatter); + formatterMap.putIfAbsent(formatter.pattern.toLowerCase(Locale.ENGLISH), formatter); } } FORMATTER_MAP = formatterMap; @@ -274,8 +276,8 @@ private static String appendOrdinalSuffix(String defaultSuffix, String s) { // the Y,YYY pattern might can cause problems with the parsing, but thankfully the last 3 // characters is enough to calculate the suffix int i = parseInt(lastNCharacter(s, 3)); - final boolean upperCase = defaultSuffix.equals(defaultSuffix.toUpperCase(Locale.ROOT)); - return s + (upperCase ? ordinalSuffix(i).toUpperCase(Locale.ROOT) : ordinalSuffix(i)); + final boolean upperCase = defaultSuffix.equals(defaultSuffix.toUpperCase(Locale.ENGLISH)); + return s + (upperCase ? ordinalSuffix(i).toUpperCase(Locale.ENGLISH) : ordinalSuffix(i)); } catch (NumberFormatException ex) { return s + defaultSuffix; } @@ -312,11 +314,11 @@ private static String removeLeadingZerosFromOffset(String offset) { private static String absoluteWeekBasedYear(TemporalAccessor t) { int year = t.get(IsoFields.WEEK_BASED_YEAR); year = year > 0 ? year : -(year - 1); - return String.format(Locale.ROOT, "%04d", year); + return String.format(Locale.ENGLISH, "%04d", year); } private static String firstDigitsOfNanos(String nano, int digits) { - return String.format(Locale.ROOT, "%09d", parseInt(nano)).substring(0, digits); + return String.format(Locale.ENGLISH, "%09d", parseInt(nano)).substring(0, digits); } private static String lastNCharacter(String s, int n) { @@ -324,7 +326,7 @@ private static String lastNCharacter(String s, int n) { } private static String zoneAbbreviationOf(TemporalAccessor temporalAccessor) { - String zone = ZoneId.from(temporalAccessor).getDisplayName(TextStyle.SHORT, Locale.ROOT); + String zone = ZoneId.from(temporalAccessor).getDisplayName(TextStyle.SHORT, Locale.ENGLISH); return "Z".equals(zone) ? "UTC" : zone; } @@ -344,7 +346,7 @@ public Builder formatFn(final String javaPattern) { public Builder formatFn(final String javaPattern, final Function additionalMapper) { this.formatFn = temporalAccessor -> { - String formatted = DateTimeFormatter.ofPattern(javaPattern != null ? javaPattern : "'" + pattern + "'", Locale.ROOT) + String formatted = DateTimeFormatter.ofPattern(javaPattern != null ? javaPattern : "'" + pattern + "'", Locale.ENGLISH) .format(temporalAccessor); return additionalMapper == null ? formatted : additionalMapper.apply(formatted); }; From b685a436ce462c874d88c9ca22a07b37ec5187d3 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Mon, 26 Aug 2024 15:18:47 +0300 Subject: [PATCH 203/389] Adding RankDocsRetrieverBuilder and RankDocsQuery (#111709) --- .../retriever/RankDocRetrieverBuilderIT.java | 755 ++++++++++++++++++ .../search/retriever/RetrieverRewriteIT.java | 11 + server/src/main/java/module-info.java | 1 + .../org/elasticsearch/TransportVersions.java | 1 + .../elasticsearch/common/lucene/Lucene.java | 3 + .../uhighlight/CustomUnifiedHighlighter.java | 5 +- .../elasticsearch/search/SearchModule.java | 5 + .../search/builder/SearchSourceBuilder.java | 6 +- .../elasticsearch/search/rank/RankDoc.java | 49 +- .../search/rank/feature/RankFeatureDoc.java | 14 +- .../search/retriever/KnnRetrieverBuilder.java | 14 + .../retriever/RankDocsRetrieverBuilder.java | 146 ++++ .../search/retriever/RetrieverBuilder.java | 33 + .../retriever/StandardRetrieverBuilder.java | 11 + .../retriever/rankdoc/RankDocsQuery.java | 199 +++++ .../rankdoc/RankDocsQueryBuilder.java | 111 +++ .../rankdoc/RankDocsSortBuilder.java | 113 +++ .../retriever/rankdoc/RankDocsSortField.java | 101 +++ .../search/SearchPhaseControllerTests.java | 5 +- .../action/search/SearchRequestTests.java | 16 + .../search/SearchServiceTests.java | 48 +- .../search/query/QuerySearchResultTests.java | 6 +- .../search/rank/RankDocTests.java | 57 ++ .../KnnRetrieverBuilderParsingTests.java | 32 + .../RankDocsRetrieverBuilderTests.java | 165 ++++ .../StandardRetrieverBuilderParsingTests.java | 31 + .../rankdoc/RankDocsQueryBuilderTests.java | 120 +++ .../rankdoc/RankDocsSortBuilderTests.java | 71 ++ .../search/rank/TestRankDoc.java | 45 -- .../search/rank/TestRankShardResult.java | 6 +- .../retriever/TestRetrieverBuilder.java | 6 + .../random/RandomRankRetrieverBuilder.java | 8 +- .../TextSimilarityRankRetrieverBuilder.java | 62 +- ...xtSimilarityRankRetrieverBuilderTests.java | 145 +++- .../70_text_similarity_rank_retriever.yml | 1 + .../xpack/rank/rrf/RRFRankDoc.java | 34 + .../xpack/rank/rrf/RRFRetrieverBuilder.java | 6 + .../xpack/rank/rrf/RRFRankDocTests.java | 5 + 38 files changed, 2351 insertions(+), 96 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RankDocRetrieverBuilderIT.java create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQuery.java create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilder.java create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortBuilder.java create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortField.java create mode 100644 server/src/test/java/org/elasticsearch/search/rank/RankDocTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilderTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortBuilderTests.java delete mode 100644 test/framework/src/main/java/org/elasticsearch/search/rank/TestRankDoc.java diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RankDocRetrieverBuilderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RankDocRetrieverBuilderIT.java new file mode 100644 index 0000000000000..fa4cafc66c822 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RankDocRetrieverBuilderIT.java @@ -0,0 +1,755 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.search.join.ScoreMode; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.MultiSearchRequest; +import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.index.query.InnerHitBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.MockSearchService; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.builder.PointInTimeBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.collapse.CollapseBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.search.sort.NestedSortBuilder; +import org.elasticsearch.search.sort.ScoreSortBuilder; +import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.hamcrest.Matchers.equalTo; + +public class RankDocRetrieverBuilderIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(MockSearchService.TestPlugin.class); + } + + public record RetrieverSource(RetrieverBuilder retriever, SearchSourceBuilder source) {} + + private static String INDEX = "test_index"; + private static final String ID_FIELD = "_id"; + private static final String DOC_FIELD = "doc"; + private static final String TEXT_FIELD = "text"; + private static final String VECTOR_FIELD = "vector"; + private static final String TOPIC_FIELD = "topic"; + private static final String LAST_30D_FIELD = "views.last30d"; + private static final String ALL_TIME_FIELD = "views.all"; + + @Before + public void setup() throws Exception { + String mapping = """ + { + "properties": { + "vector": { + "type": "dense_vector", + "dims": 3, + "element_type": "float", + "index": true, + "similarity": "l2_norm", + "index_options": { + "type": "hnsw" + } + }, + "text": { + "type": "text" + }, + "doc": { + "type": "keyword" + }, + "topic": { + "type": "keyword" + }, + "views": { + "type": "nested", + "properties": { + "last30d": { + "type": "integer" + }, + "all": { + "type": "integer" + } + } + } + } + } + """; + createIndex(INDEX, Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).build()); + admin().indices().preparePutMapping(INDEX).setSource(mapping, XContentType.JSON).get(); + indexDoc( + INDEX, + "doc_1", + DOC_FIELD, + "doc_1", + TOPIC_FIELD, + "technology", + TEXT_FIELD, + "the quick brown fox jumps over the lazy dog", + LAST_30D_FIELD, + 100 + ); + indexDoc( + INDEX, + "doc_2", + DOC_FIELD, + "doc_2", + TOPIC_FIELD, + "astronomy", + TEXT_FIELD, + "you know, for Search!", + VECTOR_FIELD, + new float[] { 1.0f, 2.0f, 3.0f }, + LAST_30D_FIELD, + 3 + ); + indexDoc(INDEX, "doc_3", DOC_FIELD, "doc_3", TOPIC_FIELD, "technology", VECTOR_FIELD, new float[] { 6.0f, 6.0f, 6.0f }); + indexDoc( + INDEX, + "doc_4", + DOC_FIELD, + "doc_4", + TOPIC_FIELD, + "technology", + TEXT_FIELD, + "aardvark is a really awesome animal, but not very quick", + ALL_TIME_FIELD, + 100, + LAST_30D_FIELD, + 40 + ); + indexDoc(INDEX, "doc_5", DOC_FIELD, "doc_5", TOPIC_FIELD, "science", TEXT_FIELD, "irrelevant stuff"); + indexDoc( + INDEX, + "doc_6", + DOC_FIELD, + "doc_6", + TEXT_FIELD, + "quick quick quick quick search", + VECTOR_FIELD, + new float[] { 10.0f, 30.0f, 100.0f }, + LAST_30D_FIELD, + 15 + ); + indexDoc( + INDEX, + "doc_7", + DOC_FIELD, + "doc_7", + TOPIC_FIELD, + "biology", + TEXT_FIELD, + "dog", + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + ALL_TIME_FIELD, + 1000 + ); + refresh(INDEX); + } + + public void testRankDocsRetrieverBasicWithPagination() { + final int rankWindowSize = 100; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, and 6 + standard0.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("quick").defaultField(TEXT_FIELD)) + .boost(10L); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + // the compound retriever here produces a score for a doc based on the percentage of the queries that it was matched on and + // resolves ties based on actual score, rank, and then the doc (we're forcing 1 shard for consistent results) + // so ideal rank would be: 6, 2, 1, 4, 7, 3 and with pagination, we'd just omit the first result + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ) + ); + // include some pagination as well + source.from(1); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(6L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_2")); + assertThat(resp.getHits().getAt(1).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(2).getId(), equalTo("doc_4")); + assertThat(resp.getHits().getAt(3).getId(), equalTo("doc_7")); + assertThat(resp.getHits().getAt(4).getId(), equalTo("doc_3")); + }); + } + + public void testRankDocsRetrieverWithAggs() { + // same as above, but we only want to bring back the top result from each subsearch + // so that would be 1, 2, and 7 + // and final rank would be (based on score): 2, 1, 7 + // aggs should still account for the same docs as the testRankDocsRetriever test, i.e. all but doc_5 + final int rankWindowSize = 1; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, and 6 + standard0.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("quick").defaultField(TEXT_FIELD)) + .boost(10L); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ) + ); + source.aggregation(new TermsAggregationBuilder("topic").field(TOPIC_FIELD)); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(1L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_2")); + assertNotNull(resp.getAggregations()); + assertNotNull(resp.getAggregations().get("topic")); + Terms terms = resp.getAggregations().get("topic"); + // doc_3 is not part of the final aggs computation as it is only retrieved through the knn retriever + // and is outside of the rank window + assertThat(terms.getBucketByKey("technology").getDocCount(), equalTo(2L)); + assertThat(terms.getBucketByKey("astronomy").getDocCount(), equalTo(1L)); + assertThat(terms.getBucketByKey("biology").getDocCount(), equalTo(1L)); + }); + } + + public void testRankDocsRetrieverWithCollapse() { + final int rankWindowSize = 100; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, and 6 + standard0.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("quick").defaultField(TEXT_FIELD)) + .boost(10L); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + // the compound retriever here produces a score for a doc based on the percentage of the queries that it was matched on and + // resolves ties based on actual score, rank, and then the doc (we're forcing 1 shard for consistent results) + // so ideal rank would be: 6, 2, 1, 4, 7, 3 + // with collapsing on topic field we would have 6, 2, 1, 7 + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ) + ); + source.collapse( + new CollapseBuilder(TOPIC_FIELD).setInnerHits( + new InnerHitBuilder("a").addSort(new FieldSortBuilder(DOC_FIELD).order(SortOrder.DESC)).setSize(10) + ) + ); + source.fetchField(TOPIC_FIELD); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(6L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getHits().length, equalTo(4)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_6")); + assertThat(resp.getHits().getAt(1).getId(), equalTo("doc_2")); + assertThat(resp.getHits().getAt(1).field(TOPIC_FIELD).getValue().toString(), equalTo("astronomy")); + assertThat(resp.getHits().getAt(2).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(2).field(TOPIC_FIELD).getValue().toString(), equalTo("technology")); + assertThat(resp.getHits().getAt(2).getInnerHits().get("a").getHits().length, equalTo(3)); + assertThat(resp.getHits().getAt(2).getInnerHits().get("a").getAt(0).getId(), equalTo("doc_4")); + assertThat(resp.getHits().getAt(2).getInnerHits().get("a").getAt(1).getId(), equalTo("doc_3")); + assertThat(resp.getHits().getAt(2).getInnerHits().get("a").getAt(2).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(3).getId(), equalTo("doc_7")); + assertThat(resp.getHits().getAt(3).field(TOPIC_FIELD).getValue().toString(), equalTo("biology")); + }); + } + + public void testRankDocsRetrieverWithCollapseAndAggs() { + // same as above, but we only want to bring back the top result from each subsearch + // so that would be 1, 2, and 7 + // and final rank would be (based on score): 2, 1, 7 + // aggs should still account for the same docs as the testRankDocsRetriever test, i.e. all but doc_5 + final int rankWindowSize = 10; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1 and 6 as doc_4 is collapsed to doc_1 + standard0.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("quick").defaultField(TEXT_FIELD)) + .boost(10L); + standard0.collapseBuilder = new CollapseBuilder(TOPIC_FIELD).setInnerHits( + new InnerHitBuilder("a").addSort(new FieldSortBuilder(DOC_FIELD).order(SortOrder.DESC)).setSize(10) + ); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + // the compound retriever here produces a score for a doc based on the percentage of the queries that it was matched on and + // resolves ties based on actual score, rank, and then the doc (we're forcing 1 shard for consistent results) + // so ideal rank would be: 6, 2, 1, 4, 7, 3 + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ) + ); + source.aggregation(new TermsAggregationBuilder("topic").field(TOPIC_FIELD)); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(5L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_6")); + assertNotNull(resp.getAggregations()); + assertNotNull(resp.getAggregations().get("topic")); + Terms terms = resp.getAggregations().get("topic"); + // doc_3 is not part of the final aggs computation as it is only retrieved through the knn retriever + // and is outside of the rank window + assertThat(terms.getBucketByKey("technology").getDocCount(), equalTo(3L)); + assertThat(terms.getBucketByKey("astronomy").getDocCount(), equalTo(1L)); + assertThat(terms.getBucketByKey("biology").getDocCount(), equalTo(1L)); + }); + } + + public void testRankDocsRetrieverWithNestedQuery() { + final int rankWindowSize = 100; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, and 6 + standard0.queryBuilder = QueryBuilders.nestedQuery("views", QueryBuilders.rangeQuery(LAST_30D_FIELD).gt(10L), ScoreMode.Avg) + .innerHit(new InnerHitBuilder("a").addSort(new FieldSortBuilder(DOC_FIELD).order(SortOrder.DESC)).setSize(10)); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + // the compound retriever here produces a score for a doc based on the percentage of the queries that it was matched on and + // resolves ties based on actual score, rank, and then the doc (we're forcing 1 shard for consistent results) + // so ideal rank would be: 6, 2, 1, 4, 3, 7 + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ) + ); + source.fetchField(TOPIC_FIELD); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(6L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_6")); + assertThat(resp.getHits().getAt(1).getId(), equalTo("doc_2")); + assertThat(resp.getHits().getAt(2).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(3).getId(), equalTo("doc_7")); + assertThat(resp.getHits().getAt(4).getId(), equalTo("doc_4")); + assertThat(resp.getHits().getAt(5).getId(), equalTo("doc_3")); + }); + } + + public void testRankDocsRetrieverMultipleCompoundRetrievers() { + final int rankWindowSize = 100; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, and 6 + standard0.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("quick").defaultField(TEXT_FIELD)) + .boost(10L); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + // the compound retriever here produces a score for a doc based on the percentage of the queries that it was matched on and + // resolves ties based on actual score, rank, and then the doc (we're forcing 1 shard for consistent results) + // so ideal rank would be: 6, 2, 1, 4, 7, 3 + CompoundRetrieverWithRankDocs compoundRetriever1 = new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ); + // simple standard retriever that would have the doc_4 as its first (and only) result + StandardRetrieverBuilder standard2 = new StandardRetrieverBuilder(); + standard2.queryBuilder = QueryBuilders.queryStringQuery("aardvark").defaultField(TEXT_FIELD); + + // combining the two retrievers would bring doc_4 at the top as it would be the only one present in both doc sets + // the rest of the docs would be sorted based on their ranks as they have the same score (1/2) + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList(new RetrieverSource(compoundRetriever1, null), new RetrieverSource(standard2, null)) + ) + ); + + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(6L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_4")); + assertThat(resp.getHits().getAt(1).getId(), equalTo("doc_6")); + assertThat(resp.getHits().getAt(2).getId(), equalTo("doc_2")); + assertThat(resp.getHits().getAt(3).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(4).getId(), equalTo("doc_7")); + assertThat(resp.getHits().getAt(5).getId(), equalTo("doc_3")); + }); + } + + public void testRankDocsRetrieverDifferentNestedSorting() { + final int rankWindowSize = 100; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, 6, 2 + standard0.queryBuilder = QueryBuilders.nestedQuery("views", QueryBuilders.rangeQuery(LAST_30D_FIELD).gt(0), ScoreMode.Avg); + standard0.sortBuilders = List.of( + new FieldSortBuilder(LAST_30D_FIELD).setNestedSort(new NestedSortBuilder("views")).order(SortOrder.DESC) + ); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 4, 7 + standard1.queryBuilder = QueryBuilders.nestedQuery("views", QueryBuilders.rangeQuery(ALL_TIME_FIELD).gt(0), ScoreMode.Avg); + standard1.sortBuilders = List.of( + new FieldSortBuilder(ALL_TIME_FIELD).setNestedSort(new NestedSortBuilder("views")).order(SortOrder.ASC) + ); + + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList(new RetrieverSource(standard0, null), new RetrieverSource(standard1, null)) + ) + ); + + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(5L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_4")); + assertThat(resp.getHits().getAt(1).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(2).getId(), equalTo("doc_7")); + assertThat(resp.getHits().getAt(3).getId(), equalTo("doc_6")); + assertThat(resp.getHits().getAt(4).getId(), equalTo("doc_2")); + }); + } + + class CompoundRetrieverWithRankDocs extends RetrieverBuilder { + + private final List sources; + private final int rankWindowSize; + + private CompoundRetrieverWithRankDocs(int rankWindowSize, List sources) { + this.rankWindowSize = rankWindowSize; + this.sources = Collections.unmodifiableList(sources); + } + + @Override + public boolean isCompound() { + return true; + } + + @Override + public QueryBuilder topDocsQuery() { + throw new UnsupportedOperationException("should not be called"); + } + + @Override + public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { + if (ctx.getPointInTimeBuilder() == null) { + throw new IllegalStateException("PIT is required"); + } + + // Rewrite prefilters + boolean hasChanged = false; + var newPreFilters = rewritePreFilters(ctx); + hasChanged |= newPreFilters != preFilterQueryBuilders; + + // Rewrite retriever sources + List newRetrievers = new ArrayList<>(); + for (var entry : sources) { + RetrieverBuilder newRetriever = entry.retriever.rewrite(ctx); + if (newRetriever != entry.retriever) { + newRetrievers.add(new RetrieverSource(newRetriever, null)); + hasChanged |= newRetriever != entry.retriever; + } else if (newRetriever == entry.retriever) { + var sourceBuilder = entry.source != null + ? entry.source + : createSearchSourceBuilder(ctx.getPointInTimeBuilder(), newRetriever); + var rewrittenSource = sourceBuilder.rewrite(ctx); + newRetrievers.add(new RetrieverSource(newRetriever, rewrittenSource)); + hasChanged |= rewrittenSource != entry.source; + } + } + if (hasChanged) { + return new CompoundRetrieverWithRankDocs(rankWindowSize, newRetrievers); + } + + // execute searches + final SetOnce results = new SetOnce<>(); + final MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); + for (var entry : sources) { + SearchRequest searchRequest = new SearchRequest().source(entry.source); + // The can match phase can reorder shards, so we disable it to ensure the stable ordering + searchRequest.setPreFilterShardSize(Integer.MAX_VALUE); + multiSearchRequest.add(searchRequest); + } + ctx.registerAsyncAction((client, listener) -> { + client.execute(TransportMultiSearchAction.TYPE, multiSearchRequest, new ActionListener<>() { + @Override + public void onResponse(MultiSearchResponse items) { + List topDocs = new ArrayList<>(); + for (int i = 0; i < items.getResponses().length; i++) { + var item = items.getResponses()[i]; + var rankDocs = getRankDocs(item.getResponse()); + sources.get(i).retriever().setRankDocs(rankDocs); + topDocs.add(rankDocs); + } + results.set(combineResults(topDocs)); + listener.onResponse(null); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + }); + + return new RankDocsRetrieverBuilder( + rankWindowSize, + newRetrievers.stream().map(s -> s.retriever).toList(), + results::get, + newPreFilters + ); + } + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + throw new UnsupportedOperationException("should not be called"); + } + + @Override + public String getName() { + return "compound_retriever"; + } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException { + + } + + @Override + protected boolean doEquals(Object o) { + return false; + } + + @Override + protected int doHashCode() { + return 0; + } + + private RankDoc[] getRankDocs(SearchResponse searchResponse) { + assert searchResponse != null; + int size = Math.min(rankWindowSize, searchResponse.getHits().getHits().length); + RankDoc[] docs = new RankDoc[size]; + for (int i = 0; i < size; i++) { + var hit = searchResponse.getHits().getAt(i); + long sortValue = (long) hit.getRawSortValues()[hit.getRawSortValues().length - 1]; + int doc = decodeDoc(sortValue); + int shardRequestIndex = decodeShardRequestIndex(sortValue); + docs[i] = new RankDoc(doc, hit.getScore(), shardRequestIndex); + docs[i].rank = i + 1; + } + return docs; + } + + public static int decodeDoc(long value) { + return (int) value; + } + + public static int decodeShardRequestIndex(long value) { + return (int) (value >> 32); + } + + record RankDocAndHitRatio(RankDoc rankDoc, float hitRatio) {} + + /** + * Combines the provided {@code rankResults} to return the final top documents. + */ + public RankDoc[] combineResults(List rankResults) { + int totalQueries = rankResults.size(); + final float step = 1.0f / totalQueries; + Map docsToRankResults = Maps.newMapWithExpectedSize(rankWindowSize); + for (var rankResult : rankResults) { + for (RankDoc scoreDoc : rankResult) { + docsToRankResults.compute(new RankDoc.RankKey(scoreDoc.doc, scoreDoc.shardIndex), (key, value) -> { + if (value == null) { + RankDoc res = new RankDoc(scoreDoc.doc, scoreDoc.score, scoreDoc.shardIndex); + res.rank = scoreDoc.rank; + return new RankDocAndHitRatio(res, step); + } else { + RankDoc res = new RankDoc(scoreDoc.doc, Math.max(scoreDoc.score, value.rankDoc.score), scoreDoc.shardIndex); + res.rank = Math.min(scoreDoc.rank, value.rankDoc.rank); + return new RankDocAndHitRatio(res, value.hitRatio + step); + } + }); + } + } + // sort the results based on hit ratio, then doc, then rank, and final tiebreaker is based on smaller doc id + RankDocAndHitRatio[] sortedResults = docsToRankResults.values().toArray(RankDocAndHitRatio[]::new); + Arrays.sort(sortedResults, (RankDocAndHitRatio doc1, RankDocAndHitRatio doc2) -> { + if (doc1.hitRatio != doc2.hitRatio) { + return doc1.hitRatio < doc2.hitRatio ? 1 : -1; + } + if (false == (Float.isNaN(doc1.rankDoc.score) || Float.isNaN(doc2.rankDoc.score)) + && (doc1.rankDoc.score != doc2.rankDoc.score)) { + return doc1.rankDoc.score < doc2.rankDoc.score ? 1 : -1; + } + if (doc1.rankDoc.rank != doc2.rankDoc.rank) { + return doc1.rankDoc.rank < doc2.rankDoc.rank ? -1 : 1; + } + return doc1.rankDoc.doc < doc2.rankDoc.doc ? -1 : 1; + }); + // trim the results if needed, otherwise each shard will always return `rank_window_size` results. + // pagination and all else will happen on the coordinator when combining the shard responses + RankDoc[] topResults = new RankDoc[Math.min(rankWindowSize, sortedResults.length)]; + for (int rank = 0; rank < topResults.length; ++rank) { + topResults[rank] = sortedResults[rank].rankDoc; + topResults[rank].rank = rank + 1; + topResults[rank].score = sortedResults[rank].hitRatio; + } + return topResults; + } + } + + private SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, RetrieverBuilder retrieverBuilder) { + var sourceBuilder = new SearchSourceBuilder().pointInTimeBuilder(pit).trackTotalHits(false).size(100); + retrieverBuilder.extractToSearchSourceBuilder(sourceBuilder, false); + + // Record the shard id in the sort result + List> sortBuilders = sourceBuilder.sorts() != null ? new ArrayList<>(sourceBuilder.sorts()) : new ArrayList<>(); + if (sortBuilders.isEmpty()) { + sortBuilders.add(new ScoreSortBuilder()); + } + sortBuilders.add(new FieldSortBuilder(FieldSortBuilder.SHARD_DOC_FIELD_NAME)); + sourceBuilder.sort(sortBuilders); + return sourceBuilder; + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java index 00013a8d396ba..e618a1b75cc4d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.plugins.Plugin; @@ -141,6 +142,11 @@ private AssertingRetrieverBuilder(RetrieverBuilder innerRetriever) { this.innerRetriever = innerRetriever; } + @Override + public QueryBuilder topDocsQuery() { + return null; + } + @Override public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { assertNull(ctx.getPointInTimeBuilder()); @@ -200,6 +206,11 @@ public boolean isCompound() { return true; } + @Override + public QueryBuilder topDocsQuery() { + return null; + } + @Override public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { assertNotNull(ctx.getPointInTimeBuilder()); diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index d29009cd76b8d..c223db531e688 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -364,6 +364,7 @@ exports org.elasticsearch.search.rank.rerank; exports org.elasticsearch.search.rescore; exports org.elasticsearch.search.retriever; + exports org.elasticsearch.search.retriever.rankdoc; exports org.elasticsearch.search.runtime; exports org.elasticsearch.search.searchafter; exports org.elasticsearch.search.slice; diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 33a16797e7e23..78f1b21ea7a44 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -196,6 +196,7 @@ static TransportVersion def(int id) { public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_726_00_0); public static final TransportVersion LTR_SERVERLESS_RELEASE = def(8_727_00_0); public static final TransportVersion ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT = def(8_728_00_0); + public static final TransportVersion RANK_DOCS_RETRIEVER = def(8_729_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 36b3076c29a31..acdc3e32ea31a 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -73,6 +73,7 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.lucene.grouping.TopFieldGroups; +import org.elasticsearch.search.retriever.rankdoc.RankDocsSortField; import org.elasticsearch.search.sort.ShardDocSortField; import java.io.IOException; @@ -551,6 +552,8 @@ private static SortField rewriteMergeSortField(SortField sortField) { return newSortField; } else if (sortField.getClass() == ShardDocSortField.class) { return new SortField(sortField.getField(), SortField.Type.LONG, sortField.getReverse()); + } else if (sortField.getClass() == RankDocsSortField.class) { + return new SortField(sortField.getField(), SortField.Type.INT, sortField.getReverse()); } else { return sortField; } diff --git a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighter.java b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighter.java index 07eec973c77e0..304a48335fd11 100644 --- a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighter.java +++ b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighter.java @@ -32,6 +32,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.search.ESToParentBlockJoinQuery; +import org.elasticsearch.search.retriever.rankdoc.RankDocsQuery; import org.elasticsearch.search.runtime.AbstractScriptFieldQuery; import org.elasticsearch.search.vectors.KnnScoreDocQuery; @@ -255,10 +256,10 @@ public void visitLeaf(Query leafQuery) { hasUnknownLeaf[0] = true; } /** - * KnnScoreDocQuery requires the same reader that built the docs + * KnnScoreDocQuery and RankDocsQuery requires the same reader that built the docs * When using {@link HighlightFlag#WEIGHT_MATCHES} different readers are used and isn't supported by this query */ - if (leafQuery instanceof KnnScoreDocQuery) { + if (leafQuery instanceof KnnScoreDocQuery || leafQuery instanceof RankDocsQuery) { hasUnknownLeaf[0] = true; } super.visitLeaf(query); diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index bac5fe8c1d1ac..33c64f3eb6350 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -237,6 +237,8 @@ import org.elasticsearch.search.retriever.RetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverParserContext; import org.elasticsearch.search.retriever.StandardRetrieverBuilder; +import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; +import org.elasticsearch.search.retriever.rankdoc.RankDocsSortBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.GeoDistanceSortBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; @@ -851,10 +853,12 @@ private void registerRescorer(RescorerSpec spec) { } private void registerRankers() { + namedWriteables.add(new NamedWriteableRegistry.Entry(RankDoc.class, RankDoc.NAME, RankDoc::new)); namedWriteables.add(new NamedWriteableRegistry.Entry(RankDoc.class, RankFeatureDoc.NAME, RankFeatureDoc::new)); namedWriteables.add( new NamedWriteableRegistry.Entry(RankShardResult.class, RankFeatureShardResult.NAME, RankFeatureShardResult::new) ); + namedWriteables.add(new NamedWriteableRegistry.Entry(QueryBuilder.class, RankDocsQueryBuilder.NAME, RankDocsQueryBuilder::new)); } private void registerSorts() { @@ -862,6 +866,7 @@ private void registerSorts() { namedWriteables.add(new NamedWriteableRegistry.Entry(SortBuilder.class, ScoreSortBuilder.NAME, ScoreSortBuilder::new)); namedWriteables.add(new NamedWriteableRegistry.Entry(SortBuilder.class, ScriptSortBuilder.NAME, ScriptSortBuilder::new)); namedWriteables.add(new NamedWriteableRegistry.Entry(SortBuilder.class, FieldSortBuilder.NAME, FieldSortBuilder::new)); + namedWriteables.add(new NamedWriteableRegistry.Entry(SortBuilder.class, RankDocsSortBuilder.NAME, RankDocsSortBuilder::new)); } private static void registerFromPlugin(List plugins, Function> producer, Consumer consumer) { diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 355267a43a8f4..909b6a7882a34 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -2228,15 +2228,15 @@ public ActionRequestValidationException validate( if (sorts() != null) { specified.add(SORT_FIELD.getPreferredName()); } - if (rescores() != null) { - specified.add(RESCORE_FIELD.getPreferredName()); - } if (minScore() != null) { specified.add(MIN_SCORE_FIELD.getPreferredName()); } if (rankBuilder() != null) { specified.add(RANK_FIELD.getPreferredName()); } + if (rescores() != null) { + specified.add(RESCORE_FIELD.getPreferredName()); + } if (specified.isEmpty() == false) { validationException = addValidationError( "cannot specify [" + RETRIEVER.getPreferredName() + "] and " + specified, diff --git a/server/src/main/java/org/elasticsearch/search/rank/RankDoc.java b/server/src/main/java/org/elasticsearch/search/rank/RankDoc.java index 50b3ddc0f370a..02c03223738b5 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/RankDoc.java +++ b/server/src/main/java/org/elasticsearch/search/rank/RankDoc.java @@ -8,29 +8,37 @@ package org.elasticsearch.search.rank; +import org.apache.lucene.search.Explanation; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Objects; /** * {@code RankDoc} is the base class for all ranked results. - * Subclasses should extend this with additional information - * required for their global ranking method. + * Subclasses should extend this with additional information required for their global ranking method. */ -public abstract class RankDoc extends ScoreDoc implements NamedWriteable { +public class RankDoc extends ScoreDoc implements NamedWriteable, ToXContentFragment { + + public static final String NAME = "rank_doc"; public static final int NO_RANK = -1; /** - * If this document has been ranked, this is its final - * rrf ranking from all the result sets. + * If this document has been ranked, this is its final rrf ranking from all the result sets. */ public int rank = NO_RANK; + @Override + public String getWriteableName() { + return NAME; + } + public record RankKey(int doc, int shardIndex) {} public RankDoc(int doc, float score, int shardIndex) { @@ -51,7 +59,26 @@ public final void writeTo(StreamOutput out) throws IOException { doWriteTo(out); } - protected abstract void doWriteTo(StreamOutput out) throws IOException; + protected void doWriteTo(StreamOutput out) throws IOException {}; + + /** + * Explain the ranking of this document. + */ + public Explanation explain() { + return Explanation.match(rank, "doc [" + doc + "] with an original score of [" + score + "] is at rank [" + rank + "]."); + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("_rank", rank); + builder.field("_doc", doc); + builder.field("_shard", shardIndex); + builder.field("_score", score); + doToXContent(builder, params); + return builder; + } + + protected void doToXContent(XContentBuilder builder, Params params) throws IOException {} @Override public final boolean equals(Object o) { @@ -61,17 +88,21 @@ public final boolean equals(Object o) { return doc == rd.doc && score == rd.score && shardIndex == rd.shardIndex && rank == rd.rank && doEquals(rd); } - protected abstract boolean doEquals(RankDoc rd); + protected boolean doEquals(RankDoc rd) { + return true; + } @Override public final int hashCode() { return Objects.hash(doc, score, shardIndex, doHashCode()); } - protected abstract int doHashCode(); + protected int doHashCode() { + return 0; + } @Override public String toString() { - return "RankDoc{" + "score=" + score + ", doc=" + doc + ", shardIndex=" + shardIndex + '}'; + return "RankDoc{" + "_rank=" + rank + ", _doc=" + doc + ", _shard=" + shardIndex + ", _score=" + score + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureDoc.java b/server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureDoc.java index d8b4ec10410f1..8b0cc33844f82 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureDoc.java +++ b/server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureDoc.java @@ -8,9 +8,11 @@ package org.elasticsearch.search.rank.feature; +import org.apache.lucene.search.Explanation; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Objects; @@ -22,7 +24,7 @@ public class RankFeatureDoc extends RankDoc { public static final String NAME = "rank_feature_doc"; - // todo: update to support more than 1 fields; and not restrict to string data + // TODO: update to support more than 1 fields; and not restrict to string data public String featureData; public RankFeatureDoc(int doc, float score, int shardIndex) { @@ -34,6 +36,11 @@ public RankFeatureDoc(StreamInput in) throws IOException { featureData = in.readOptionalString(); } + @Override + public Explanation explain() { + throw new UnsupportedOperationException("explain is not supported for {" + getClass() + "}"); + } + public void featureData(String featureData) { this.featureData = featureData; } @@ -58,4 +65,9 @@ protected int doHashCode() { public String getWriteableName() { return NAME; } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("featureData", featureData); + } } diff --git a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java index b369324b3ee52..f2a2a606c0e01 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java @@ -10,7 +10,11 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; +import org.elasticsearch.search.vectors.ExactKnnQueryBuilder; import org.elasticsearch.search.vectors.KnnSearchBuilder; import org.elasticsearch.search.vectors.QueryVectorBuilder; import org.elasticsearch.search.vectors.VectorData; @@ -120,6 +124,16 @@ public String getName() { return NAME; } + @Override + public QueryBuilder topDocsQuery() { + assert rankDocs != null : "{rankDocs} should have been materialized at this point"; + + BoolQueryBuilder knnTopResultsQuery = new BoolQueryBuilder().filter(new RankDocsQueryBuilder(rankDocs)) + .should(new ExactKnnQueryBuilder(VectorData.fromFloats(queryVector), field, similarity)); + preFilterQueryBuilders.forEach(knnTopResultsQuery::filter); + return knnTopResultsQuery; + } + @Override public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { KnnSearchBuilder knnSearchBuilder = new KnnSearchBuilder( diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java new file mode 100644 index 0000000000000..d1f6a41dc789f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.DisMaxQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; +import org.elasticsearch.search.retriever.rankdoc.RankDocsSortBuilder; +import org.elasticsearch.search.sort.ScoreSortBuilder; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +/** + * An {@link RetrieverBuilder} that is used to retrieve documents based on the rank of the documents. + */ +public class RankDocsRetrieverBuilder extends RetrieverBuilder { + + public static final String NAME = "rank_docs_retriever"; + private final int rankWindowSize; + final List sources; + final Supplier rankDocs; + + public RankDocsRetrieverBuilder( + int rankWindowSize, + List sources, + Supplier rankDocs, + List preFilterQueryBuilders + ) { + this.rankWindowSize = rankWindowSize; + this.rankDocs = rankDocs; + this.sources = sources; + this.preFilterQueryBuilders = preFilterQueryBuilders; + } + + @Override + public String getName() { + return NAME; + } + + private boolean sourceShouldRewrite(QueryRewriteContext ctx) throws IOException { + for (var source : sources) { + if (source.isCompound()) { + return true; + } + var newSource = source.rewrite(ctx); + if (newSource != source) { + return true; + } + } + return false; + } + + @Override + public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { + assert false == sourceShouldRewrite(ctx) : "retriever sources should be rewritten first"; + var rewrittenFilters = rewritePreFilters(ctx); + if (rewrittenFilters != preFilterQueryBuilders) { + return new RankDocsRetrieverBuilder(rankWindowSize, sources, rankDocs, rewrittenFilters); + } + return this; + } + + @Override + public QueryBuilder topDocsQuery() { + // this is used to fetch all documents form the parent retrievers (i.e. sources) + // so that we can use all the matched documents to compute aggregations, nested hits etc + DisMaxQueryBuilder disMax = new DisMaxQueryBuilder().tieBreaker(0f); + for (var retriever : sources) { + var query = retriever.topDocsQuery(); + if (query != null) { + if (retriever.retrieverName() != null) { + query.queryName(retriever.retrieverName()); + } + disMax.add(query); + } + } + // ignore prefilters of this level, they are already propagated to children + return disMax; + } + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + // here we force a custom sort based on the rank of the documents + // TODO: should we adjust to account for other fields sort options just for the top ranked docs? + if (searchSourceBuilder.rescores() == null || searchSourceBuilder.rescores().isEmpty()) { + searchSourceBuilder.sort(Arrays.asList(new RankDocsSortBuilder(rankDocs.get()), new ScoreSortBuilder())); + } + if (searchSourceBuilder.explain() != null && searchSourceBuilder.explain()) { + searchSourceBuilder.trackScores(true); + } + BoolQueryBuilder boolQuery = new BoolQueryBuilder(); + RankDocsQueryBuilder rankQuery = new RankDocsQueryBuilder(rankDocs.get()); + // if we have aggregations we need to compute them based on all doc matches, not just the top hits + // so we just post-filter the top hits based on the rank queries we have + if (searchSourceBuilder.aggregations() != null) { + boolQuery.should(rankQuery); + // compute a disjunction of all the query sources that were executed to compute the top rank docs + QueryBuilder disjunctionOfSources = topDocsQuery(); + if (disjunctionOfSources != null) { + boolQuery.should(disjunctionOfSources); + } + // post filter the results so that the top docs are still the same + searchSourceBuilder.postFilter(rankQuery); + } else { + boolQuery.must(rankQuery); + } + // add any prefilters present in the retriever + for (var preFilterQueryBuilder : preFilterQueryBuilders) { + boolQuery.filter(preFilterQueryBuilder); + } + searchSourceBuilder.query(boolQuery); + } + + @Override + protected boolean doEquals(Object o) { + RankDocsRetrieverBuilder other = (RankDocsRetrieverBuilder) o; + return Arrays.equals(rankDocs.get(), other.rankDocs.get()) + && sources.equals(other.sources) + && rankWindowSize == other.rankWindowSize; + } + + @Override + protected int doHashCode() { + return Objects.hash(super.hashCode(), Arrays.hashCode(rankDocs.get()), sources, rankWindowSize); + } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException { + throw new UnsupportedOperationException("toXContent() is not supported for " + this.getClass()); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java index 3a9979030683a..0857ef21adaaf 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java @@ -17,6 +17,7 @@ import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.xcontent.AbstractObjectParser; import org.elasticsearch.xcontent.FilterXContentParserWrapper; import org.elasticsearch.xcontent.NamedObjectNotFoundException; @@ -191,6 +192,34 @@ public boolean isCompound() { return false; } + protected RankDoc[] rankDocs = null; + + public RetrieverBuilder() {} + + protected final List rewritePreFilters(QueryRewriteContext ctx) throws IOException { + List newFilters = new ArrayList<>(preFilterQueryBuilders.size()); + boolean changed = false; + for (var filter : preFilterQueryBuilders) { + var newFilter = filter.rewrite(ctx); + changed |= filter != newFilter; + newFilters.add(newFilter); + } + if (changed) { + return newFilters; + } + return preFilterQueryBuilders; + } + + /** + * This function is called by compound {@link RetrieverBuilder} to return the original query that + * was used by this retriever to compute its top documents. + */ + public abstract QueryBuilder topDocsQuery(); + + public void setRankDocs(RankDoc[] rankDocs) { + this.rankDocs = rankDocs; + } + /** * Gets the filters for this retriever. */ @@ -254,5 +283,9 @@ public String toString() { return Strings.toString(this, true, true); } + public String retrieverName() { + return retrieverName; + } + // ---- END FOR TESTING ---- } diff --git a/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java index 4694780770617..682d456295ba9 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java @@ -106,6 +106,17 @@ public static StandardRetrieverBuilder fromXContent(XContentParser parser, Retri Float minScore; CollapseBuilder collapseBuilder; + @Override + public QueryBuilder topDocsQuery() { + // TODO: for compound retrievers this will have to be reworked as queries like knn could be executed twice + if (preFilterQueryBuilders.isEmpty()) { + return queryBuilder; + } + var ret = new BoolQueryBuilder().filter(queryBuilder); + preFilterQueryBuilders.stream().forEach(ret::filter); + return ret; + } + @Override public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { if (preFilterQueryBuilders.isEmpty() == false) { diff --git a/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQuery.java b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQuery.java new file mode 100644 index 0000000000000..77da1cc80bc97 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQuery.java @@ -0,0 +1,199 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever.rankdoc; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryVisitor; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.elasticsearch.search.rank.RankDoc; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; + +/** + * A {@code RankDocsQuery} returns the top k documents in the order specified by the global doc IDs. + * This is used by retrievers that compute a score for a large document set, and need access to just the top results, + * after performing any reranking or filtering. + */ +public class RankDocsQuery extends Query { + + private final RankDoc[] docs; + private final int[] segmentStarts; + private final Object contextIdentity; + + /** + * Creates a {@code RankDocsQuery} based on the provided docs. + * + * @param docs the global doc IDs of documents that match, in ascending order + * @param segmentStarts the indexes in docs and scores corresponding to the first matching + * document in each segment. If a segment has no matching documents, it should be assigned + * the index of the next segment that does. There should be a final entry that is always + * docs.length-1. + * @param contextIdentity an object identifying the reader context that was used to build this + * query + */ + RankDocsQuery(RankDoc[] docs, int[] segmentStarts, Object contextIdentity) { + this.docs = docs; + this.segmentStarts = segmentStarts; + this.contextIdentity = contextIdentity; + } + + @Override + public Query rewrite(IndexSearcher searcher) throws IOException { + if (docs.length == 0) { + return new MatchNoDocsQuery(); + } + return this; + } + + RankDoc[] rankDocs() { + return docs; + } + + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + if (searcher.getIndexReader().getContext().id() != contextIdentity) { + throw new IllegalStateException("This RankDocsDocQuery was created by a different reader"); + } + return new Weight(this) { + + @Override + public int count(LeafReaderContext context) { + return segmentStarts[context.ord + 1] - segmentStarts[context.ord]; + } + + @Override + public Explanation explain(LeafReaderContext context, int doc) { + int found = Arrays.binarySearch(docs, doc + context.docBase, (a, b) -> Integer.compare(((RankDoc) a).doc, (int) b)); + if (found < 0) { + return Explanation.noMatch("doc not found in top " + docs.length + " rank docs"); + } + return docs[found].explain(); + } + + @Override + public Scorer scorer(LeafReaderContext context) { + // Segment starts indicate how many docs are in the segment, + // upper equalling lower indicates no documents for this segment + if (segmentStarts[context.ord] == segmentStarts[context.ord + 1]) { + return null; + } + return new Scorer(this) { + final int lower = segmentStarts[context.ord]; + final int upper = segmentStarts[context.ord + 1]; + int upTo = -1; + float score; + + @Override + public DocIdSetIterator iterator() { + return new DocIdSetIterator() { + @Override + public int docID() { + return currentDocId(); + } + + @Override + public int nextDoc() { + if (upTo == -1) { + upTo = lower; + } else { + ++upTo; + } + return currentDocId(); + } + + @Override + public int advance(int target) throws IOException { + return slowAdvance(target); + } + + @Override + public long cost() { + return upper - lower; + } + }; + } + + @Override + public float getMaxScore(int docId) { + if (docId != NO_MORE_DOCS) { + docId += context.docBase; + } + float maxScore = 0; + for (int idx = Math.max(lower, upTo); idx < upper && docs[idx].doc <= docId; idx++) { + maxScore = Math.max(maxScore, docs[idx].score); + } + return maxScore; + } + + @Override + public float score() { + return docs[upTo].score; + } + + @Override + public int docID() { + return currentDocId(); + } + + private int currentDocId() { + if (upTo == -1) { + return -1; + } + if (upTo >= upper) { + return NO_MORE_DOCS; + } + return docs[upTo].doc - context.docBase; + } + + }; + } + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + return true; + } + }; + } + + @Override + public String toString(String field) { + return this.getClass().getSimpleName() + "{rank_docs:" + Arrays.toString(docs) + "}"; + } + + @Override + public void visit(QueryVisitor visitor) { + visitor.visitLeaf(this); + } + + @Override + public boolean equals(Object obj) { + if (sameClassAs(obj) == false) { + return false; + } + return Arrays.equals(docs, ((RankDocsQuery) obj).docs) + && Arrays.equals(segmentStarts, ((RankDocsQuery) obj).segmentStarts) + && contextIdentity == ((RankDocsQuery) obj).contextIdentity; + } + + @Override + public int hashCode() { + return Objects.hash(classHash(), Arrays.hashCode(docs), Arrays.hashCode(segmentStarts), contextIdentity); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilder.java new file mode 100644 index 0000000000000..ff2085bc8903f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilder.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever.rankdoc; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.Query; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; + +public class RankDocsQueryBuilder extends AbstractQueryBuilder { + + public static final String NAME = "rank_docs_query"; + + private final RankDoc[] rankDocs; + + public RankDocsQueryBuilder(RankDoc[] rankDocs) { + this.rankDocs = rankDocs; + } + + public RankDocsQueryBuilder(StreamInput in) throws IOException { + super(in); + this.rankDocs = in.readArray(c -> c.readNamedWriteable(RankDoc.class), RankDoc[]::new); + } + + RankDoc[] rankDocs() { + return rankDocs; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeArray(StreamOutput::writeNamedWriteable, rankDocs); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected Query doToQuery(SearchExecutionContext context) throws IOException { + RankDoc[] shardRankDocs = Arrays.stream(rankDocs) + .filter(r -> r.shardIndex == context.getShardRequestIndex()) + .sorted(Comparator.comparingInt(r -> r.doc)) + .toArray(RankDoc[]::new); + IndexReader reader = context.getIndexReader(); + int[] segmentStarts = findSegmentStarts(reader, shardRankDocs); + return new RankDocsQuery(shardRankDocs, segmentStarts, reader.getContext().id()); + } + + private static int[] findSegmentStarts(IndexReader reader, RankDoc[] docs) { + int[] starts = new int[reader.leaves().size() + 1]; + starts[starts.length - 1] = docs.length; + if (starts.length == 2) { + return starts; + } + int resultIndex = 0; + for (int i = 1; i < starts.length - 1; i++) { + int upper = reader.leaves().get(i).docBase; + resultIndex = Arrays.binarySearch(docs, resultIndex, docs.length, upper, (a, b) -> Integer.compare(((RankDoc) a).doc, (int) b)); + if (resultIndex < 0) { + resultIndex = -1 - resultIndex; + } + starts[i] = resultIndex; + } + return starts; + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + builder.startArray("rank_docs"); + for (RankDoc doc : rankDocs) { + builder.startObject(); + doc.toXContent(builder, params); + builder.endObject(); + } + builder.endArray(); + builder.endObject(); + } + + @Override + protected boolean doEquals(RankDocsQueryBuilder other) { + return Arrays.equals(rankDocs, other.rankDocs); + } + + @Override + protected int doHashCode() { + return Arrays.hashCode(rankDocs); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.RANK_DOCS_RETRIEVER; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortBuilder.java new file mode 100644 index 0000000000000..0122e6ee9ea12 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortBuilder.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever.rankdoc; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.sort.BucketedSort; +import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.search.sort.SortFieldAndFormat; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +/** + * Builds a {@code RankDocsSortField} that sorts documents by their rank as computed through the {@code RankDocsRetrieverBuilder}. + */ +public class RankDocsSortBuilder extends SortBuilder { + public static final String NAME = "rank_docs_sort"; + + private RankDoc[] rankDocs; + + public RankDocsSortBuilder(RankDoc[] rankDocs) { + this.rankDocs = rankDocs; + } + + public RankDocsSortBuilder(StreamInput in) throws IOException { + this.rankDocs = in.readArray(c -> c.readNamedWriteable(RankDoc.class), RankDoc[]::new); + } + + public RankDocsSortBuilder(RankDocsSortBuilder original) { + this.rankDocs = original.rankDocs; + } + + public RankDocsSortBuilder rankDocs(RankDoc[] rankDocs) { + this.rankDocs = rankDocs; + return this; + } + + public RankDoc[] rankDocs() { + return this.rankDocs; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeArray(StreamOutput::writeNamedWriteable, rankDocs); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public SortBuilder rewrite(QueryRewriteContext ctx) throws IOException { + return this; + } + + @Override + protected SortFieldAndFormat build(SearchExecutionContext context) throws IOException { + RankDoc[] shardRankDocs = Arrays.stream(rankDocs) + .filter(r -> r.shardIndex == context.getShardRequestIndex()) + .toArray(RankDoc[]::new); + return new SortFieldAndFormat(new RankDocsSortField(shardRankDocs), DocValueFormat.RAW); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.RANK_DOCS_RETRIEVER; + } + + @Override + public BucketedSort buildBucketedSort(SearchExecutionContext context, BigArrays bigArrays, int bucketSize, BucketedSort.ExtraData extra) + throws IOException { + throw new UnsupportedOperationException("buildBucketedSort() is not supported for " + this.getClass()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + throw new UnsupportedOperationException("toXContent() is not supported for " + this.getClass()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + RankDocsSortBuilder that = (RankDocsSortBuilder) obj; + return Arrays.equals(rankDocs, that.rankDocs) && this.order.equals(that.order); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(this.rankDocs), this.order); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortField.java b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortField.java new file mode 100644 index 0000000000000..3cd29d352028b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortField.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever.rankdoc; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.FieldComparator; +import org.apache.lucene.search.FieldComparatorSource; +import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; +import org.apache.lucene.search.Scorable; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.comparators.NumericComparator; +import org.apache.lucene.util.hnsw.IntToIntFunction; +import org.elasticsearch.search.rank.RankDoc; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * A {@link SortField} that sorts documents by their rank as computed through the {@code RankDocsRetrieverBuilder}. + * This is used when we want to score and rank the documents irrespective of their original scores, + * but based on the provided rank they were assigned, e.g. through an RRF retriever. + **/ +public class RankDocsSortField extends SortField { + + public static final String NAME = "_rank"; + + public RankDocsSortField(RankDoc[] rankDocs) { + super(NAME, new FieldComparatorSource() { + @Override + public FieldComparator newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { + return new RankDocsComparator(numHits, rankDocs); + } + }); + } + + private static class RankDocsComparator extends NumericComparator { + private final int[] values; + private final Map rankDocMap; + private int topValue; + private int bottom; + + private RankDocsComparator(int numHits, RankDoc[] rankDocs) { + super(NAME, Integer.MAX_VALUE, false, Pruning.NONE, Integer.BYTES); + this.values = new int[numHits]; + this.rankDocMap = Arrays.stream(rankDocs).collect(Collectors.toMap(k -> k.doc, v -> v.rank)); + } + + @Override + public int compare(int slot1, int slot2) { + return Integer.compare(values[slot1], values[slot2]); + } + + @Override + public Integer value(int slot) { + return Integer.valueOf(values[slot]); + } + + @Override + public void setTopValue(Integer value) { + topValue = value; + } + + @Override + public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { + IntToIntFunction docToRank = doc -> rankDocMap.getOrDefault(context.docBase + doc, Integer.MAX_VALUE); + return new LeafFieldComparator() { + @Override + public void setBottom(int slot) throws IOException { + bottom = values[slot]; + } + + @Override + public int compareBottom(int doc) { + return Integer.compare(bottom, docToRank.apply(doc)); + } + + @Override + public int compareTop(int doc) { + return Integer.compare(topValue, docToRank.apply(doc)); + } + + @Override + public void copy(int slot, int doc) { + values[slot] = docToRank.apply(doc); + } + + @Override + public void setScorer(Scorable scorer) {} + }; + } + } +} diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index ed02328d388b6..585e7c775da35 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -58,7 +58,6 @@ import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.search.rank.RankShardResult; -import org.elasticsearch.search.rank.TestRankDoc; import org.elasticsearch.search.rank.TestRankShardResult; import org.elasticsearch.search.rank.context.QueryPhaseRankCoordinatorContext; import org.elasticsearch.search.suggest.SortBy; @@ -463,10 +462,10 @@ private static AtomicArray generateQueryResults( topDocs = Lucene.EMPTY_TOP_DOCS; } else if (rank) { int nDocs = randomIntBetween(0, searchHitsSize); - TestRankDoc[] rankDocs = new TestRankDoc[nDocs]; + RankDoc[] rankDocs = new RankDoc[nDocs]; for (int i = 0; i < nDocs; i++) { float score = useConstantScore ? 1.0F : Math.abs(randomFloat()); - rankDocs[i] = new TestRankDoc(i, score, shardIndex); + rankDocs[i] = new RankDoc(i, score, shardIndex); maxScore = Math.max(score, maxScore); } querySearchResult.setRankShardResult(new TestRankShardResult(rankDocs)); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 84a4eab897ba8..2b1b95d8595f0 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.search.AbstractSearchTestCase; @@ -289,6 +290,11 @@ protected int doHashCode() { public boolean isCompound() { return true; } + + @Override + public QueryBuilder topDocsQuery() { + return null; + } })); searchRequest.allowPartialSearchResults(true); searchRequest.scroll((Scroll) null); @@ -303,6 +309,11 @@ public boolean isCompound() { { // allow_partial_results and non-compound retriever SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder().retriever(new RetrieverBuilder() { + @Override + public QueryBuilder topDocsQuery() { + return null; + } + @Override public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { // no-op @@ -361,6 +372,11 @@ protected int doHashCode() { public boolean isCompound() { return true; } + + @Override + public QueryBuilder topDocsQuery() { + return null; + } })); searchRequest.scroll((Scroll) null); ActionRequestValidationException validationErrors = searchRequest.validate(); diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 7ddcc88facb2a..bdddea58b713f 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -111,9 +111,9 @@ import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rank.RankBuilder; +import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.search.rank.RankShardResult; import org.elasticsearch.search.rank.TestRankBuilder; -import org.elasticsearch.search.rank.TestRankDoc; import org.elasticsearch.search.rank.TestRankShardResult; import org.elasticsearch.search.rank.context.QueryPhaseRankCoordinatorContext; import org.elasticsearch.search.rank.context.QueryPhaseRankShardContext; @@ -504,9 +504,9 @@ public RankShardResult combineQueryPhaseResults(List rankResults) { // we know we have just 1 query, so return all the docs from it return new TestRankShardResult( Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new TestRankDoc(x.doc, x.score, x.shardIndex)) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) .limit(rankWindowSize()) - .toArray(TestRankDoc[]::new) + .toArray(RankDoc[]::new) ); } }; @@ -553,7 +553,7 @@ public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) queryResult = (QuerySearchResult) queryPhaseResults.get(); // these are the matched docs from the query phase - final TestRankDoc[] queryRankDocs = ((TestRankShardResult) queryResult.getRankShardResult()).testRankDocs; + final RankDoc[] queryRankDocs = ((TestRankShardResult) queryResult.getRankShardResult()).testRankDocs; // assume that we have cut down to these from the coordinator node as the top-docs to run the rank feature phase upon List topRankWindowSizeDocs = randomNonEmptySubsetOf(Arrays.stream(queryRankDocs).map(x -> x.doc).toList()); @@ -709,18 +709,18 @@ public ScoreDoc[] rankQueryPhaseResults( List querySearchResults, SearchPhaseController.TopDocsStats topDocStats ) { - List rankDocs = new ArrayList<>(); + List rankDocs = new ArrayList<>(); for (int i = 0; i < querySearchResults.size(); i++) { QuerySearchResult querySearchResult = querySearchResults.get(i); TestRankShardResult shardResult = (TestRankShardResult) querySearchResult .getRankShardResult(); - for (TestRankDoc trd : shardResult.testRankDocs) { + for (RankDoc trd : shardResult.testRankDocs) { trd.shardIndex = i; rankDocs.add(trd); } } - rankDocs.sort(Comparator.comparing((TestRankDoc doc) -> doc.score).reversed()); - TestRankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(TestRankDoc[]::new); + rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); + RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); topDocStats.fetchHits = topResults.length; return topResults; } @@ -741,9 +741,9 @@ public RankShardResult combineQueryPhaseResults(List rankResults) { // we know we have just 1 query, so return all the docs from it return new TestRankShardResult( Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new TestRankDoc(x.doc, x.score, x.shardIndex)) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) .limit(rankWindowSize()) - .toArray(TestRankDoc[]::new) + .toArray(RankDoc[]::new) ); } }; @@ -868,9 +868,9 @@ public RankShardResult combineQueryPhaseResults(List rankResults) { // we know we have just 1 query, so return all the docs from it return new TestRankShardResult( Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new TestRankDoc(x.doc, x.score, x.shardIndex)) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) .limit(rankWindowSize()) - .toArray(TestRankDoc[]::new) + .toArray(RankDoc[]::new) ); } }; @@ -969,18 +969,18 @@ public ScoreDoc[] rankQueryPhaseResults( List querySearchResults, SearchPhaseController.TopDocsStats topDocStats ) { - List rankDocs = new ArrayList<>(); + List rankDocs = new ArrayList<>(); for (int i = 0; i < querySearchResults.size(); i++) { QuerySearchResult querySearchResult = querySearchResults.get(i); TestRankShardResult shardResult = (TestRankShardResult) querySearchResult .getRankShardResult(); - for (TestRankDoc trd : shardResult.testRankDocs) { + for (RankDoc trd : shardResult.testRankDocs) { trd.shardIndex = i; rankDocs.add(trd); } } - rankDocs.sort(Comparator.comparing((TestRankDoc doc) -> doc.score).reversed()); - TestRankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(TestRankDoc[]::new); + rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); + RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); topDocStats.fetchHits = topResults.length; return topResults; } @@ -1001,9 +1001,9 @@ public RankShardResult combineQueryPhaseResults(List rankResults) { // we know we have just 1 query, so return all the docs from it return new TestRankShardResult( Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new TestRankDoc(x.doc, x.score, x.shardIndex)) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) .limit(rankWindowSize()) - .toArray(TestRankDoc[]::new) + .toArray(RankDoc[]::new) ); } }; @@ -1097,18 +1097,18 @@ public ScoreDoc[] rankQueryPhaseResults( List querySearchResults, SearchPhaseController.TopDocsStats topDocStats ) { - List rankDocs = new ArrayList<>(); + List rankDocs = new ArrayList<>(); for (int i = 0; i < querySearchResults.size(); i++) { QuerySearchResult querySearchResult = querySearchResults.get(i); TestRankShardResult shardResult = (TestRankShardResult) querySearchResult .getRankShardResult(); - for (TestRankDoc trd : shardResult.testRankDocs) { + for (RankDoc trd : shardResult.testRankDocs) { trd.shardIndex = i; rankDocs.add(trd); } } - rankDocs.sort(Comparator.comparing((TestRankDoc doc) -> doc.score).reversed()); - TestRankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(TestRankDoc[]::new); + rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); + RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); topDocStats.fetchHits = topResults.length; return topResults; } @@ -1129,9 +1129,9 @@ public RankShardResult combineQueryPhaseResults(List rankResults) { // we know we have just 1 query, so return all the docs from it return new TestRankShardResult( Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new TestRankDoc(x.doc, x.score, x.shardIndex)) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) .limit(rankWindowSize()) - .toArray(TestRankDoc[]::new) + .toArray(RankDoc[]::new) ); } }; diff --git a/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java b/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java index b1db177f6ccdd..cb0b69e5ef2ac 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java @@ -27,9 +27,9 @@ import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.search.rank.RankShardResult; import org.elasticsearch.search.rank.TestRankBuilder; -import org.elasticsearch.search.rank.TestRankDoc; import org.elasticsearch.search.rank.TestRankShardResult; import org.elasticsearch.search.suggest.SuggestTests; import org.elasticsearch.test.ESTestCase; @@ -80,9 +80,9 @@ private static QuerySearchResult createTestInstance() throws Exception { result.from(randomInt()); if (randomBoolean()) { int queryCount = randomIntBetween(2, 4); - TestRankDoc[] docs = new TestRankDoc[randomIntBetween(5, 20)]; + RankDoc[] docs = new RankDoc[randomIntBetween(5, 20)]; for (int di = 0; di < docs.length; ++di) { - docs[di] = new TestRankDoc(di, -1, queryCount); + docs[di] = new RankDoc(di, -1, queryCount); } result.setRankShardResult(new TestRankShardResult(docs)); } diff --git a/server/src/test/java/org/elasticsearch/search/rank/RankDocTests.java b/server/src/test/java/org/elasticsearch/search/rank/RankDocTests.java new file mode 100644 index 0000000000000..5e7ac2957c250 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/rank/RankDocTests.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.rank; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; + +public class RankDocTests extends AbstractWireSerializingTestCase { + + static RankDoc createTestRankDoc() { + RankDoc rankDoc = new RankDoc(randomNonNegativeInt(), randomFloat(), randomIntBetween(0, 1)); + rankDoc.rank = randomNonNegativeInt(); + return rankDoc; + } + + @Override + protected Writeable.Reader instanceReader() { + return RankDoc::new; + } + + @Override + protected RankDoc createTestInstance() { + return createTestRankDoc(); + } + + @Override + protected RankDoc mutateInstance(RankDoc instance) throws IOException { + RankDoc mutated = new RankDoc(instance.doc, instance.score, instance.shardIndex); + mutated.rank = instance.rank; + if (frequently()) { + mutated.doc = randomNonNegativeInt(); + } + if (frequently()) { + mutated.score = randomFloat(); + } + if (frequently()) { + mutated.shardIndex = randomNonNegativeInt(); + } + if (frequently()) { + mutated.rank = randomNonNegativeInt(); + } + return mutated; + } + + public void testExplain() { + RankDoc instance = createTestRankDoc(); + assertEquals(instance.explain().toString(), instance.explain().toString()); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java b/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java index de35d765a1551..afa6ff89a79e9 100644 --- a/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java +++ b/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchNoneQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -18,6 +19,9 @@ import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; +import org.elasticsearch.search.vectors.ExactKnnQueryBuilder; import org.elasticsearch.test.AbstractXContentTestCase; import org.elasticsearch.usage.SearchUsage; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -102,6 +106,34 @@ public void testRewrite() throws IOException { } } + public void testIsCompound() { + KnnRetrieverBuilder knnRetriever = createRandomKnnRetrieverBuilder(); + assertFalse(knnRetriever.isCompound()); + } + + public void testTopDocsQuery() { + KnnRetrieverBuilder knnRetriever = createRandomKnnRetrieverBuilder(); + knnRetriever.rankDocs = new RankDoc[] { + new RankDoc(0, randomFloat(), 0), + new RankDoc(10, randomFloat(), 0), + new RankDoc(20, randomFloat(), 1), + new RankDoc(25, randomFloat(), 1), }; + final int preFilters = knnRetriever.preFilterQueryBuilders.size(); + QueryBuilder topDocsQuery = knnRetriever.topDocsQuery(); + assertNotNull(topDocsQuery); + assertThat(topDocsQuery, instanceOf(BoolQueryBuilder.class)); + assertThat(((BoolQueryBuilder) topDocsQuery).filter().size(), equalTo(1 + preFilters)); + assertThat(((BoolQueryBuilder) topDocsQuery).filter().get(0), instanceOf(RankDocsQueryBuilder.class)); + for (int i = 0; i < preFilters; i++) { + assertThat( + ((BoolQueryBuilder) topDocsQuery).filter().get(i + 1), + instanceOf(knnRetriever.preFilterQueryBuilders.get(i).getClass()) + ); + } + assertThat(((BoolQueryBuilder) topDocsQuery).should().size(), equalTo(1)); + assertThat(((BoolQueryBuilder) topDocsQuery).should().get(0), instanceOf(ExactKnnQueryBuilder.class)); + } + @Override protected boolean supportsUnknownFields() { return false; diff --git a/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java b/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java new file mode 100644 index 0000000000000..59f5ddf0d87ca --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.DisMaxQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.RandomQueryBuilder; +import org.elasticsearch.index.query.Rewriteable; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; +import org.elasticsearch.search.retriever.rankdoc.RankDocsSortBuilder; +import org.elasticsearch.search.sort.ScoreSortBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.search.vectors.KnnSearchBuilderTests.randomVector; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; + +public class RankDocsRetrieverBuilderTests extends ESTestCase { + + private Supplier rankDocsSupplier() { + final int rankDocsCount = randomIntBetween(0, 10); + final int shardIndex = 0; + RankDoc[] rankDocs = new RankDoc[rankDocsCount]; + int docId = 0; + for (int i = 0; i < rankDocsCount; i++) { + RankDoc testRankDoc = new RankDoc(docId, randomFloat(), shardIndex); + docId += randomInt(100); + rankDocs[i] = testRankDoc; + } + return () -> rankDocs; + } + + private List innerRetrievers() { + List retrievers = new ArrayList<>(); + int numRetrievers = randomIntBetween(1, 10); + for (int i = 0; i < numRetrievers; i++) { + if (randomBoolean()) { + StandardRetrieverBuilder standardRetrieverBuilder = new StandardRetrieverBuilder(); + standardRetrieverBuilder.queryBuilder = RandomQueryBuilder.createQuery(random()); + if (randomBoolean()) { + standardRetrieverBuilder.preFilterQueryBuilders = preFilters(); + } + retrievers.add(standardRetrieverBuilder); + } else { + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + randomAlphaOfLength(10), + randomVector(randomInt(10)), + null, + randomInt(10), + randomIntBetween(10, 100), + randomFloat() + ); + if (randomBoolean()) { + knnRetrieverBuilder.preFilterQueryBuilders = preFilters(); + } + knnRetrieverBuilder.rankDocs = rankDocsSupplier().get(); + retrievers.add(knnRetrieverBuilder); + } + } + return retrievers; + } + + private List preFilters() { + List preFilters = new ArrayList<>(); + int numPreFilters = randomInt(10); + for (int i = 0; i < numPreFilters; i++) { + preFilters.add(RandomQueryBuilder.createQuery(random())); + } + return preFilters; + } + + private RankDocsRetrieverBuilder createRandomRankDocsRetrieverBuilder() { + return new RankDocsRetrieverBuilder(randomInt(100), innerRetrievers(), rankDocsSupplier(), preFilters()); + } + + public void testExtractToSearchSourceBuilder() { + RankDocsRetrieverBuilder retriever = createRandomRankDocsRetrieverBuilder(); + SearchSourceBuilder source = new SearchSourceBuilder(); + if (randomBoolean()) { + source.aggregation(new TermsAggregationBuilder("name").field("field")); + } + retriever.extractToSearchSourceBuilder(source, randomBoolean()); + assertThat(source.sorts().size(), equalTo(2)); + assertThat(source.sorts().get(0), instanceOf(RankDocsSortBuilder.class)); + assertThat(source.sorts().get(1), instanceOf(ScoreSortBuilder.class)); + assertThat(source.query(), instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder bq = (BoolQueryBuilder) source.query(); + if (source.aggregations() != null) { + assertThat(bq.must().size(), equalTo(0)); + assertThat(bq.should().size(), greaterThanOrEqualTo(1)); + assertThat(bq.should().get(0), instanceOf(RankDocsQueryBuilder.class)); + assertNotNull(source.postFilter()); + assertThat(source.postFilter(), instanceOf(RankDocsQueryBuilder.class)); + } else { + assertThat(bq.must().size(), equalTo(1)); + assertThat(bq.must().get(0), instanceOf(RankDocsQueryBuilder.class)); + assertNull(source.postFilter()); + } + assertThat(bq.filter().size(), equalTo(retriever.preFilterQueryBuilders.size())); + } + + public void testTopDocsQuery() { + RankDocsRetrieverBuilder retriever = createRandomRankDocsRetrieverBuilder(); + QueryBuilder topDocs = retriever.topDocsQuery(); + assertNotNull(topDocs); + assertThat(topDocs, instanceOf(DisMaxQueryBuilder.class)); + assertThat(((DisMaxQueryBuilder) topDocs).innerQueries(), hasSize(retriever.sources.size())); + } + + public void testRewrite() throws IOException { + RankDocsRetrieverBuilder retriever = createRandomRankDocsRetrieverBuilder(); + boolean compoundAdded = false; + if (randomBoolean()) { + compoundAdded = true; + retriever.sources.add(new TestRetrieverBuilder("compound_retriever") { + @Override + public boolean isCompound() { + return true; + } + }); + } + SearchSourceBuilder source = new SearchSourceBuilder().retriever(retriever); + QueryRewriteContext queryRewriteContext = mock(QueryRewriteContext.class); + if (compoundAdded) { + expectThrows(AssertionError.class, () -> Rewriteable.rewrite(source, queryRewriteContext)); + } else { + SearchSourceBuilder rewrittenSource = Rewriteable.rewrite(source, queryRewriteContext); + assertNull(rewrittenSource.retriever()); + assertTrue(rewrittenSource.knnSearch().isEmpty()); + assertThat( + rewrittenSource.query(), + anyOf(instanceOf(BoolQueryBuilder.class), instanceOf(MatchAllQueryBuilder.class), instanceOf(MatchNoneQueryBuilder.class)) + ); + if (rewrittenSource.query() instanceof BoolQueryBuilder) { + BoolQueryBuilder bq = (BoolQueryBuilder) rewrittenSource.query(); + assertThat(bq.filter().size(), equalTo(retriever.preFilterQueryBuilders.size())); + // we don't have any aggregations so the RankDocs query is set as a must clause + assertThat(bq.must().size(), equalTo(1)); + assertThat(bq.must().get(0), instanceOf(RankDocsQueryBuilder.class)); + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java b/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java index cd0d8f8d50c1e..166b07e23446c 100644 --- a/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java +++ b/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java @@ -166,6 +166,37 @@ public void testRewrite() throws IOException { } } + public void testIsCompound() { + StandardRetrieverBuilder standardRetriever = createTestInstance(); + assertFalse(standardRetriever.isCompound()); + } + + public void testTopDocsQuery() throws IOException { + StandardRetrieverBuilder standardRetriever = createTestInstance(); + final int preFilters = standardRetriever.preFilterQueryBuilders.size(); + if (standardRetriever.queryBuilder == null) { + if (preFilters > 0) { + expectThrows(IllegalArgumentException.class, standardRetriever::topDocsQuery); + } + } else { + QueryBuilder topDocsQuery = standardRetriever.topDocsQuery(); + assertNotNull(topDocsQuery); + if (preFilters > 0) { + assertThat(topDocsQuery, instanceOf(BoolQueryBuilder.class)); + assertThat(((BoolQueryBuilder) topDocsQuery).filter().size(), equalTo(1 + preFilters)); + assertThat(((BoolQueryBuilder) topDocsQuery).filter().get(0), instanceOf(standardRetriever.queryBuilder.getClass())); + for (int i = 0; i < preFilters; i++) { + assertThat( + ((BoolQueryBuilder) topDocsQuery).filter().get(i + 1), + instanceOf(standardRetriever.preFilterQueryBuilders.get(i).getClass()) + ); + } + } else { + assertThat(topDocsQuery, instanceOf(standardRetriever.queryBuilder.getClass())); + } + } + } + private static void assertEqualQueryOrMatchAllNone(QueryBuilder actual, QueryBuilder expected) { assertThat(actual, anyOf(instanceOf(MatchAllQueryBuilder.class), instanceOf(MatchNoneQueryBuilder.class), equalTo(expected))); } diff --git a/server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilderTests.java new file mode 100644 index 0000000000000..5ff0ec2017e96 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilderTests.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever.rankdoc; + +import org.apache.lucene.document.Document; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.test.AbstractQueryTestCase; + +import java.io.IOException; +import java.util.Arrays; + +public class RankDocsQueryBuilderTests extends AbstractQueryTestCase { + + private RankDoc[] generateRandomRankDocs() { + int totalDocs = randomIntBetween(0, 10); + RankDoc[] rankDocs = new RankDoc[totalDocs]; + int currentDoc = 0; + for (int i = 0; i < totalDocs; i++) { + RankDoc rankDoc = new RankDoc(currentDoc, randomFloat(), randomIntBetween(0, 2)); + rankDocs[i] = rankDoc; + currentDoc += randomIntBetween(0, 100); + } + return rankDocs; + } + + @Override + protected RankDocsQueryBuilder doCreateTestQueryBuilder() { + RankDoc[] rankDocs = generateRandomRankDocs(); + return new RankDocsQueryBuilder(rankDocs); + } + + @Override + protected void doAssertLuceneQuery(RankDocsQueryBuilder queryBuilder, Query query, SearchExecutionContext context) throws IOException { + assertTrue(query instanceof RankDocsQuery); + RankDocsQuery rankDocsQuery = (RankDocsQuery) query; + assertArrayEquals(queryBuilder.rankDocs(), rankDocsQuery.rankDocs()); + } + + /** + * Overridden to ensure that {@link SearchExecutionContext} has a non-null {@link IndexReader} + */ + @Override + public void testToQuery() throws IOException { + try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { + iw.addDocument(new Document()); + try (IndexReader reader = iw.getReader()) { + SearchExecutionContext context = createSearchExecutionContext(newSearcher(reader)); + RankDocsQueryBuilder queryBuilder = createTestQueryBuilder(); + Query query = queryBuilder.doToQuery(context); + + assertTrue(query instanceof RankDocsQuery); + RankDocsQuery rankDocsQuery = (RankDocsQuery) query; + + int shardIndex = context.getShardRequestIndex(); + int expectedDocs = (int) Arrays.stream(queryBuilder.rankDocs()).filter(x -> x.shardIndex == shardIndex).count(); + assertEquals(expectedDocs, rankDocsQuery.rankDocs().length); + } + } + } + + /** + * Overridden to ensure that {@link SearchExecutionContext} has a non-null {@link IndexReader} + */ + @Override + public void testCacheability() throws IOException { + try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { + iw.addDocument(new Document()); + try (IndexReader reader = iw.getReader()) { + SearchExecutionContext context = createSearchExecutionContext(newSearcher(reader)); + RankDocsQueryBuilder queryBuilder = createTestQueryBuilder(); + QueryBuilder rewriteQuery = rewriteQuery(queryBuilder, new SearchExecutionContext(context)); + assertNotNull(rewriteQuery.toQuery(context)); + assertTrue("query should be cacheable: " + queryBuilder.toString(), context.isCacheable()); + } + } + } + + /** + * Overridden to ensure that {@link SearchExecutionContext} has a non-null {@link IndexReader} + */ + @Override + public void testMustRewrite() throws IOException { + try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { + iw.addDocument(new Document()); + try (IndexReader reader = iw.getReader()) { + SearchExecutionContext context = createSearchExecutionContext(newSearcher(reader)); + context.setAllowUnmappedFields(true); + RankDocsQueryBuilder queryBuilder = createTestQueryBuilder(); + queryBuilder.toQuery(context); + } + } + } + + @Override + public void testFromXContent() throws IOException { + // no-op since RankDocsQueryBuilder is an internal only API + } + + @Override + public void testUnknownField() throws IOException { + // no-op since RankDocsQueryBuilder is agnostic to unknown fields and an internal only API + } + + @Override + public void testValidOutput() throws IOException { + // no-op since RankDocsQueryBuilder is an internal only API + } +} diff --git a/server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortBuilderTests.java b/server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortBuilderTests.java new file mode 100644 index 0000000000000..ba7728c5dc622 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortBuilderTests.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever.rankdoc; + +import org.apache.lucene.search.SortField; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.sort.AbstractSortTestCase; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class RankDocsSortBuilderTests extends AbstractSortTestCase { + + @Override + protected RankDocsSortBuilder createTestItem() { + return randomRankDocsSortBuulder(); + } + + private RankDocsSortBuilder randomRankDocsSortBuulder() { + RankDoc[] rankDocs = randomRankDocs(randomInt(100)); + return new RankDocsSortBuilder(rankDocs); + } + + private RankDoc[] randomRankDocs(int totalDocs) { + RankDoc[] rankDocs = new RankDoc[totalDocs]; + for (int i = 0; i < totalDocs; i++) { + rankDocs[i] = new RankDoc(randomNonNegativeInt(), randomFloat(), randomIntBetween(0, 1)); + rankDocs[i].rank = i + 1; + } + return rankDocs; + } + + @Override + protected RankDocsSortBuilder mutate(RankDocsSortBuilder original) throws IOException { + RankDocsSortBuilder mutated = new RankDocsSortBuilder(original); + mutated.rankDocs(randomRankDocs(original.rankDocs().length + randomInt(100))); + return mutated; + } + + @Override + public void testFromXContent() throws IOException { + // no-op + } + + @Override + protected RankDocsSortBuilder fromXContent(XContentParser parser, String fieldName) throws IOException { + throw new UnsupportedOperationException( + "{" + RankDocsSortBuilder.class.getSimpleName() + "} does not support parsing from XContent" + ); + } + + @Override + protected void sortFieldAssertions(RankDocsSortBuilder builder, SortField sortField, DocValueFormat format) throws IOException { + assertThat(builder.order(), equalTo(SortOrder.ASC)); + assertThat(sortField, instanceOf(RankDocsSortField.class)); + assertThat(sortField.getField(), equalTo(RankDocsSortField.NAME)); + assertThat(sortField.getType(), equalTo(SortField.Type.CUSTOM)); + assertThat(sortField.getReverse(), equalTo(false)); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankDoc.java b/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankDoc.java deleted file mode 100644 index f2f3cb82d203f..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankDoc.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.search.rank; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -public class TestRankDoc extends RankDoc { - - public TestRankDoc(int doc, float score, int shardIndex) { - super(doc, score, shardIndex); - } - - public TestRankDoc(StreamInput in) throws IOException { - super(in); - } - - @Override - public void doWriteTo(StreamOutput out) throws IOException { - // do nothing - } - - @Override - public boolean doEquals(RankDoc rd) { - return true; - } - - @Override - public int doHashCode() { - return 0; - } - - @Override - public String getWriteableName() { - return "test_rank_doc"; - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankShardResult.java b/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankShardResult.java index ab66d021497d5..6c1faaf8d2abf 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankShardResult.java +++ b/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankShardResult.java @@ -17,14 +17,14 @@ public class TestRankShardResult implements RankShardResult { - public final TestRankDoc[] testRankDocs; + public final RankDoc[] testRankDocs; - public TestRankShardResult(TestRankDoc[] testRankDocs) { + public TestRankShardResult(RankDoc[] testRankDocs) { this.testRankDocs = testRankDocs; } public TestRankShardResult(StreamInput in) throws IOException { - testRankDocs = in.readArray(TestRankDoc::new, TestRankDoc[]::new); + testRankDocs = in.readArray(RankDoc::new, RankDoc[]::new); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/search/retriever/TestRetrieverBuilder.java b/test/framework/src/main/java/org/elasticsearch/search/retriever/TestRetrieverBuilder.java index 40cc1890f69ed..fcc01adf1815a 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/retriever/TestRetrieverBuilder.java +++ b/test/framework/src/main/java/org/elasticsearch/search/retriever/TestRetrieverBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.retriever; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESTestCase; @@ -65,6 +66,11 @@ public TestRetrieverBuilder(String value) { this.value = value; } + @Override + public QueryBuilder topDocsQuery() { + throw new UnsupportedOperationException("only used for parsing tests"); + } + @Override public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { throw new UnsupportedOperationException("only used for parsing tests"); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java index ab8c85cac00e3..eb36c445506a7 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverParserContext; @@ -25,7 +26,7 @@ import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; /** - * A {@code RetrieverBuilder} for parsing and constructing a text similarity reranker retriever. + * A {@code RetrieverBuilder} for randomly scoring a set of documents using the {@code RandomRankBuilder} */ public class RandomRankRetrieverBuilder extends RetrieverBuilder { @@ -74,6 +75,11 @@ public RandomRankRetrieverBuilder(RetrieverBuilder retrieverBuilder, String fiel this.seed = seed; } + @Override + public QueryBuilder topDocsQuery() { + return retrieverBuilder.topDocsQuery(); + } + @Override public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { retrieverBuilder.extractToSearchSourceBuilder(searchSourceBuilder, compoundUsed); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java index e1d27e96cc5ff..927c708268a49 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java @@ -9,6 +9,8 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; @@ -20,6 +22,7 @@ import org.elasticsearch.xpack.core.XPackPlugin; import java.io.IOException; +import java.util.List; import java.util.Objects; import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; @@ -99,11 +102,61 @@ public TextSimilarityRankRetrieverBuilder( this.minScore = minScore; } + public TextSimilarityRankRetrieverBuilder( + RetrieverBuilder retrieverBuilder, + String inferenceId, + String inferenceText, + String field, + int rankWindowSize, + Float minScore, + String retrieverName, + List preFilterQueryBuilders + ) { + this.retrieverBuilder = retrieverBuilder; + this.inferenceId = inferenceId; + this.inferenceText = inferenceText; + this.field = field; + this.rankWindowSize = rankWindowSize; + this.minScore = minScore; + this.retrieverName = retrieverName; + this.preFilterQueryBuilders = preFilterQueryBuilders; + } + + @Override + public QueryBuilder topDocsQuery() { + // the original matching set of the TextSimilarityRank retriever is specified by its nested retriever + return retrieverBuilder.topDocsQuery(); + } + + @Override + public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { + // rewrite prefilters + boolean hasChanged = false; + var newPreFilters = rewritePreFilters(ctx); + hasChanged |= newPreFilters != preFilterQueryBuilders; + + // rewrite nested retriever + RetrieverBuilder newRetriever = retrieverBuilder.rewrite(ctx); + hasChanged |= newRetriever != retrieverBuilder; + if (hasChanged) { + return new TextSimilarityRankRetrieverBuilder( + newRetriever, + field, + inferenceText, + inferenceId, + rankWindowSize, + minScore, + this.retrieverName, + newPreFilters + ); + } + return this; + } + @Override public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { retrieverBuilder.getPreFilterQueryBuilders().addAll(preFilterQueryBuilders); retrieverBuilder.extractToSearchSourceBuilder(searchSourceBuilder, compoundUsed); - // Combining with other rank builder (such as RRF) is not supported yet if (searchSourceBuilder.rankBuilder() != null) { throw new IllegalArgumentException("text similarity rank builder cannot be combined with other rank builders"); @@ -114,6 +167,13 @@ public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder ); } + /** + * Determines if this retriever contains sub-retrievers that need to be executed prior to search. + */ + public boolean isCompound() { + return retrieverBuilder.isCompound(); + } + @Override public String getName() { return TextSimilarityRankBuilder.NAME; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilderTests.java index 51f240be6fbeb..332e567cae796 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilderTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilderTests.java @@ -8,10 +8,22 @@ package org.elasticsearch.xpack.inference.rank.textsimilarity; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.RandomQueryBuilder; +import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.index.query.Rewriteable; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.builder.SubSearchSourceBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverParserContext; import org.elasticsearch.search.retriever.TestRetrieverBuilder; import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.usage.SearchUsage; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; @@ -23,6 +35,10 @@ import java.util.List; import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; public class TextSimilarityRankRetrieverBuilderTests extends AbstractXContentTestCase { @@ -32,8 +48,17 @@ public class TextSimilarityRankRetrieverBuilderTests extends AbstractXContentTes * for x-content testing. */ public static TextSimilarityRankRetrieverBuilder createRandomTextSimilarityRankRetrieverBuilder() { + return createRandomTextSimilarityRankRetrieverBuilder(TestRetrieverBuilder.createRandomTestRetrieverBuilder()); + } + + /** + * Creates a random {@link TextSimilarityRankRetrieverBuilder}. The created instance + * is not guaranteed to pass {@link SearchRequest} validation. This is purely + * for x-content testing. + */ + public static TextSimilarityRankRetrieverBuilder createRandomTextSimilarityRankRetrieverBuilder(RetrieverBuilder innerRetriever) { return new TextSimilarityRankRetrieverBuilder( - TestRetrieverBuilder.createRandomTestRetrieverBuilder(), + innerRetriever, randomAlphaOfLength(10), randomAlphaOfLength(20), randomAlphaOfLength(50), @@ -104,4 +129,122 @@ public void testParserDefaults() throws IOException { } } + public void testRewriteInnerRetriever() throws IOException { + final boolean[] rewritten = { false }; + List preFilterQueryBuilders = new ArrayList<>(); + if (randomBoolean()) { + for (int i = 0; i < randomIntBetween(1, 5); i++) { + preFilterQueryBuilders.add(RandomQueryBuilder.createQuery(random())); + } + } + RetrieverBuilder innerRetriever = new TestRetrieverBuilder("top-level-retriever") { + @Override + public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { + if (randomBoolean()) { + return this; + } + rewritten[0] = true; + return new TestRetrieverBuilder("nested-rewritten-retriever") { + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + if (preFilterQueryBuilders.isEmpty() == false) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + + for (QueryBuilder preFilterQueryBuilder : preFilterQueryBuilders) { + boolQueryBuilder.filter(preFilterQueryBuilder); + } + boolQueryBuilder.must(new RangeQueryBuilder("some_field")); + searchSourceBuilder.subSearches().add(new SubSearchSourceBuilder(boolQueryBuilder)); + } else { + searchSourceBuilder.subSearches().add(new SubSearchSourceBuilder(new RangeQueryBuilder("some_field"))); + } + } + }; + } + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + if (preFilterQueryBuilders.isEmpty() == false) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + + for (QueryBuilder preFilterQueryBuilder : preFilterQueryBuilders) { + boolQueryBuilder.filter(preFilterQueryBuilder); + } + boolQueryBuilder.must(new TermQueryBuilder("field", "value")); + searchSourceBuilder.subSearches().add(new SubSearchSourceBuilder(boolQueryBuilder)); + } else { + searchSourceBuilder.subSearches().add(new SubSearchSourceBuilder(new TermQueryBuilder("field", "value"))); + } + } + }; + TextSimilarityRankRetrieverBuilder textSimilarityRankRetrieverBuilder = createRandomTextSimilarityRankRetrieverBuilder( + innerRetriever + ); + textSimilarityRankRetrieverBuilder.getPreFilterQueryBuilders().addAll(preFilterQueryBuilders); + SearchSourceBuilder source = new SearchSourceBuilder().retriever(textSimilarityRankRetrieverBuilder); + QueryRewriteContext queryRewriteContext = mock(QueryRewriteContext.class); + source = Rewriteable.rewrite(source, queryRewriteContext); + assertNull(source.retriever()); + if (false == preFilterQueryBuilders.isEmpty()) { + if (source.query() instanceof MatchAllQueryBuilder == false && source.query() instanceof MatchNoneQueryBuilder == false) { + assertThat(source.query(), instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder bq = (BoolQueryBuilder) source.query(); + assertFalse(bq.must().isEmpty()); + assertThat(bq.must().size(), equalTo(1)); + if (rewritten[0]) { + assertThat(bq.must().get(0), instanceOf(RangeQueryBuilder.class)); + } else { + assertThat(bq.must().get(0), instanceOf(TermQueryBuilder.class)); + } + for (int j = 0; j < bq.filter().size(); j++) { + assertEqualQueryOrMatchAllNone(bq.filter().get(j), preFilterQueryBuilders.get(j)); + } + } + } else { + if (rewritten[0]) { + assertThat(source.query(), instanceOf(RangeQueryBuilder.class)); + } else { + assertThat(source.query(), instanceOf(TermQueryBuilder.class)); + } + } + } + + public void testIsCompound() { + RetrieverBuilder compoundInnerRetriever = new TestRetrieverBuilder(ESTestCase.randomAlphaOfLengthBetween(5, 10)) { + @Override + public boolean isCompound() { + return true; + } + }; + RetrieverBuilder nonCompoundInnerRetriever = new TestRetrieverBuilder(ESTestCase.randomAlphaOfLengthBetween(5, 10)) { + @Override + public boolean isCompound() { + return false; + } + }; + TextSimilarityRankRetrieverBuilder compoundTextSimilarityRankRetrieverBuilder = createRandomTextSimilarityRankRetrieverBuilder( + compoundInnerRetriever + ); + assertTrue(compoundTextSimilarityRankRetrieverBuilder.isCompound()); + TextSimilarityRankRetrieverBuilder nonCompoundTextSimilarityRankRetrieverBuilder = createRandomTextSimilarityRankRetrieverBuilder( + nonCompoundInnerRetriever + ); + assertFalse(nonCompoundTextSimilarityRankRetrieverBuilder.isCompound()); + } + + public void testTopDocsQuery() { + RetrieverBuilder innerRetriever = new TestRetrieverBuilder(ESTestCase.randomAlphaOfLengthBetween(5, 10)) { + @Override + public QueryBuilder topDocsQuery() { + return new TermQueryBuilder("field", "value"); + } + }; + TextSimilarityRankRetrieverBuilder retriever = createRandomTextSimilarityRankRetrieverBuilder(innerRetriever); + assertThat(retriever.topDocsQuery(), instanceOf(TermQueryBuilder.class)); + } + + private static void assertEqualQueryOrMatchAllNone(QueryBuilder actual, QueryBuilder expected) { + assertThat(actual, anyOf(instanceOf(MatchAllQueryBuilder.class), instanceOf(MatchNoneQueryBuilder.class), equalTo(expected))); + } + } diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml index 530be2341c9c8..e2c1417057578 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml @@ -60,6 +60,7 @@ setup: text: "Sun Moon Lake is a lake in Nantou County, Taiwan. It is the largest lake in Taiwan." topic: [ "geography" ] refresh: true + --- "Simple text similarity rank retriever": diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankDoc.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankDoc.java index 8f078c0c4d116..4dbc9a6a54dcf 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankDoc.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankDoc.java @@ -7,9 +7,11 @@ package org.elasticsearch.xpack.rank.rrf; +import org.apache.lucene.search.Explanation; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Arrays; @@ -54,6 +56,32 @@ public RRFRankDoc(StreamInput in) throws IOException { scores = in.readFloatArray(); } + @Override + public Explanation explain() { + // ideally we'd need access to the rank constant to provide score info for this one + int queries = positions.length; + Explanation[] details = new Explanation[queries]; + for (int i = 0; i < queries; i++) { + final String queryIndex = "at index [" + i + "]"; + if (positions[i] == RRFRankDoc.NO_RANK) { + final String description = "rrf score: [0], result not found in query " + queryIndex; + details[i] = Explanation.noMatch(description); + } else { + final int rank = positions[i] + 1; + details[i] = Explanation.match(rank, "rank [" + (rank) + "] in query " + queryIndex); + } + } + return Explanation.match( + score, + "rrf score: [" + + score + + "] computed for initial ranks " + + Arrays.toString(Arrays.stream(positions).map(x -> x + 1).toArray()) + + "] as sum of [1 / (rank + rankConstant)] for each query", + details + ); + } + @Override public void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(rank); @@ -96,4 +124,10 @@ public String toString() { public String getWriteableName() { return NAME; } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("positions", positions); + builder.field("scores", scores); + } } diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java index e5a7983107278..0d6208e474eea 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; @@ -74,6 +75,11 @@ public static RRFRetrieverBuilder fromXContent(XContentParser parser, RetrieverP int rankWindowSize = RRFRankBuilder.DEFAULT_RANK_WINDOW_SIZE; int rankConstant = RRFRankBuilder.DEFAULT_RANK_CONSTANT; + @Override + public QueryBuilder topDocsQuery() { + throw new IllegalStateException("{" + getName() + "} cannot be nested"); + } + @Override public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { if (compoundUsed) { diff --git a/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRankDocTests.java b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRankDocTests.java index 8c0eafe3ab022..0b8ee30fe0680 100644 --- a/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRankDocTests.java +++ b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRankDocTests.java @@ -71,4 +71,9 @@ protected RRFRankDoc mutateInstance(RRFRankDoc instance) throws IOException { } return mutated; } + + public void testExplain() { + RRFRankDoc instance = createTestRRFRankDoc(); + assertEquals(instance.explain().toString(), instance.explain().toString()); + } } From 7563a724f05fab579d79144b2b2ac570bf35bcb0 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Mon, 26 Aug 2024 16:15:31 +0300 Subject: [PATCH 204/389] Updating retriever documentation to better explain how filters are applied (#112201) --- docs/reference/rest-api/common-parms.asciidoc | 16 +++++++--- docs/reference/search/retriever.asciidoc | 23 ++++++++++--- docs/reference/search/rrf.asciidoc | 6 ++-- .../retrievers-overview.asciidoc | 32 +++++++++---------- .../action/search/RankFeaturePhase.java | 6 ++-- .../QueryPhaseRankCoordinatorContext.java | 2 +- .../context/QueryPhaseRankShardContext.java | 2 +- 7 files changed, 53 insertions(+), 34 deletions(-) diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index e5ab10b7d71ba..fabd495cdc525 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -1327,13 +1327,21 @@ that lower ranked documents have more influence. This value must be greater than equal to `1`. Defaults to `60`. end::rrf-rank-constant[] -tag::rrf-window-size[] -`window_size`:: +tag::rrf-rank-window-size[] +`rank_window_size`:: (Optional, integer) + This value determines the size of the individual result sets per query. A higher value will improve result relevance at the cost of performance. The final ranked result set is pruned down to the search request's <>. -`window_size` must be greater than or equal to `size` and greater than or equal to `1`. +`rank_window_size` must be greater than or equal to `size` and greater than or equal to `1`. Defaults to the `size` parameter. -end::rrf-window-size[] +end::rrf-rank-window-size[] + +tag::rrf-filter[] +`filter`:: +(Optional, <>) ++ +Applies the specified <> to all of the specified sub-retrievers, +according to each retriever's specifications. +end::rrf-filter[] diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc index b52b296220029..58cc8ce9ef459 100644 --- a/docs/reference/search/retriever.asciidoc +++ b/docs/reference/search/retriever.asciidoc @@ -198,7 +198,7 @@ GET my-embeddings/_search An <> retriever returns top documents based on the RRF formula, equally weighting two or more child retrievers. -Reciprocal rank fusion (RRF) is a method for combining multiple result +Reciprocal rank fusion (RRF) is a method for combining multiple result sets with different relevance indicators into a single result set. ===== Parameters @@ -207,7 +207,9 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-retrievers] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-rank-constant] -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-window-size] +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-rank-window-size] + +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-filter] ===== Restrictions @@ -225,7 +227,7 @@ A simple hybrid search example (lexical search + dense vector search) combining ---- GET /restaurants/_search { - "retriever": { + "retriever": { "rrf": { <1> "retrievers": [ <2> { @@ -340,6 +342,10 @@ Currently you can: ** Refer to the <> on this page for a step-by-step guide. ===== Parameters +`retriever`:: +(Required, <>) ++ +The child retriever that generates the initial set of top documents to be re-ranked. `field`:: (Required, `string`) @@ -366,6 +372,13 @@ The number of top documents to consider in the re-ranking process. Defaults to ` + Sets a minimum threshold score for including documents in the re-ranked results. Documents with similarity scores below this threshold will be excluded. Note that score calculations vary depending on the model used. +`filter`:: +(Optional, <>) ++ +Applies the specified <> to the child <>. +If the child retriever already specifies any filters, then this top-level filter is applied in conjuction +with the filter defined in the child retriever. + ===== Restrictions A text similarity re-ranker retriever is a compound retriever. Child retrievers may not use elements that are restricted by having a compound retriever as part of the retriever tree. @@ -441,13 +454,13 @@ eland_import_hub_model \ + [source,js] ---- -PUT _inference/rerank/my-msmarco-minilm-model +PUT _inference/rerank/my-msmarco-minilm-model { "service": "elasticsearch", "service_settings": { "num_allocations": 1, "num_threads": 1, - "model_id": "cross-encoder__ms-marco-minilm-l-6-v2" + "model_id": "cross-encoder__ms-marco-minilm-l-6-v2" } } ---- diff --git a/docs/reference/search/rrf.asciidoc b/docs/reference/search/rrf.asciidoc index fb474fe6bf4e6..2525dfff23b94 100644 --- a/docs/reference/search/rrf.asciidoc +++ b/docs/reference/search/rrf.asciidoc @@ -1,9 +1,7 @@ [[rrf]] === Reciprocal rank fusion -preview::["This functionality is in technical preview and may be changed or removed in a future release. -The syntax will likely change before GA. -Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] +preview::["This functionality is in technical preview and may be changed or removed in a future release. The syntax will likely change before GA. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf[Reciprocal rank fusion (RRF)] is a method for combining multiple result sets with different relevance indicators into a single result set. @@ -43,7 +41,7 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-retrievers] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-rank-constant] -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-window-size] +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-rank-window-size] An example request using RRF: diff --git a/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc b/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc index 99659ae76e092..c0fe7471946f3 100644 --- a/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc +++ b/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc @@ -13,23 +13,23 @@ For implementation details, including notable restrictions, check out the [discrete] [[retrievers-overview-types]] -==== Retriever types +==== Retriever types Retrievers come in various types, each tailored for different search operations. The following retrievers are currently available: -* <>. Returns top documents from a -traditional https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl.html[query]. -Mimics a traditional query but in the context of a retriever framework. This -ensures backward compatibility as existing `_search` requests remain supported. -That way you can transition to the new abstraction at your own pace without +* <>. Returns top documents from a +traditional https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl.html[query]. +Mimics a traditional query but in the context of a retriever framework. This +ensures backward compatibility as existing `_search` requests remain supported. +That way you can transition to the new abstraction at your own pace without mixing syntaxes. -* <>. Returns top documents from a <>, +* <>. Returns top documents from a <>, in the context of a retriever framework. * <>. Combines and ranks multiple first-stage retrievers using -the reciprocal rank fusion (RRF) algorithm. Allows you to combine multiple result sets +the reciprocal rank fusion (RRF) algorithm. Allows you to combine multiple result sets with different relevance indicators into a single result set. -An RRF retriever is a *compound retriever*, where its `filter` element is +An RRF retriever is a *compound retriever*, where its `filter` element is propagated to its sub retrievers. + Sub retrievers may not use elements that are restricted by having a compound retriever as part of the retriever tree. @@ -38,7 +38,7 @@ See the <> for detaile Requires first creating a `rerank` task using the <>. [discrete] -==== What makes retrievers useful? +==== What makes retrievers useful? Here's an overview of what makes retrievers useful and how they differ from regular queries. @@ -140,7 +140,7 @@ GET example-index/_search ], "rank":{ "rrf":{ - "window_size":50, + "rank_window_size":50, "rank_constant":20 } } @@ -155,14 +155,14 @@ GET example-index/_search Here are some important terms: -* *Retrieval Pipeline*. Defines the entire retrieval and ranking logic to +* *Retrieval Pipeline*. Defines the entire retrieval and ranking logic to produce top hits. * *Retriever Tree*. A hierarchical structure that defines how retrievers interact. * *First-stage Retriever*. Returns an initial set of candidate documents. -* *Compound Retriever*. Builds on one or more retrievers, +* *Compound Retriever*. Builds on one or more retrievers, enhancing document retrieval and ranking logic. -* *Combiners*. Compound retrievers that merge top hits -from multiple sub-retrievers. +* *Combiners*. Compound retrievers that merge top hits +from multiple sub-retrievers. * *Rerankers*. Special compound retrievers that reorder hits and may adjust the number of hits, with distinctions between first-stage and second-stage rerankers. [discrete] @@ -180,4 +180,4 @@ Refer to the {kibana-ref}/playground.html[Playground documentation] for more inf [[retrievers-overview-api-reference]] ==== API reference -For implementation details, including notable restrictions, check out the <> in the Search API docs. \ No newline at end of file +For implementation details, including notable restrictions, check out the <> in the Search API docs. diff --git a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java index 5b42afcb86928..0f7cbd65a63c2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java @@ -28,8 +28,8 @@ /** * This search phase is responsible for executing any re-ranking needed for the given search request, iff that is applicable. - * It starts by retrieving {@code num_shards * window_size} results from the query phase and reduces them to a global list of - * the top {@code window_size} results. It then reaches out to the shards to extract the needed feature data, + * It starts by retrieving {@code num_shards * rank_window_size} results from the query phase and reduces them to a global list of + * the top {@code rank_window_size} results. It then reaches out to the shards to extract the needed feature data, * and finally passes all this information to the appropriate {@code RankFeatureRankCoordinatorContext} which is responsible for reranking * the results. If no rank query is specified, it proceeds directly to the next phase (FetchSearchPhase) by first reducing the results. */ @@ -88,7 +88,7 @@ public void onFailure(Exception e) { void innerRun() throws Exception { // if the RankBuilder specifies a QueryPhaseCoordinatorContext, it will be called as part of the reduce call - // to operate on the first `window_size * num_shards` results and merge them appropriately. + // to operate on the first `rank_window_size * num_shards` results and merge them appropriately. SearchPhaseController.ReducedQueryPhase reducedQueryPhase = queryPhaseResults.reduce(); RankFeaturePhaseRankCoordinatorContext rankFeaturePhaseRankCoordinatorContext = coordinatorContext(context.getRequest().source()); if (rankFeaturePhaseRankCoordinatorContext != null) { diff --git a/server/src/main/java/org/elasticsearch/search/rank/context/QueryPhaseRankCoordinatorContext.java b/server/src/main/java/org/elasticsearch/search/rank/context/QueryPhaseRankCoordinatorContext.java index 1be8544758a8f..4d1c9c6785453 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/context/QueryPhaseRankCoordinatorContext.java +++ b/server/src/main/java/org/elasticsearch/search/rank/context/QueryPhaseRankCoordinatorContext.java @@ -17,7 +17,7 @@ /** * {@link QueryPhaseRankCoordinatorContext} is running on the coordinator node and is * responsible for combining the query phase results from the shards and rank them accordingly. - * The output is a `window_size` ranked list of ordered results from all shards. + * The output is a `rank_window_size` ranked list of ordered results from all shards. * Note: Currently this can use only sort by score; sort by field is not supported. */ public abstract class QueryPhaseRankCoordinatorContext { diff --git a/server/src/main/java/org/elasticsearch/search/rank/context/QueryPhaseRankShardContext.java b/server/src/main/java/org/elasticsearch/search/rank/context/QueryPhaseRankShardContext.java index f562977afb857..fa413485797e8 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/context/QueryPhaseRankShardContext.java +++ b/server/src/main/java/org/elasticsearch/search/rank/context/QueryPhaseRankShardContext.java @@ -15,7 +15,7 @@ import java.util.List; /** - * {@link QueryPhaseRankShardContext} is used to generate the top {@code window_size} + * {@link QueryPhaseRankShardContext} is used to generate the top {@code rank_window_size} * results on each shard. It specifies the queries to run during {@code QueryPhase} and is responsible for combining all query scores and * order all results through the {@link QueryPhaseRankShardContext#combineQueryPhaseResults} method. */ From 0d371978e888cf7c7186f6f04223f6cdae88d4fa Mon Sep 17 00:00:00 2001 From: Michael Peterson Date: Mon, 26 Aug 2024 09:53:26 -0400 Subject: [PATCH 205/389] Search coordinator uses event.ingested in cluster state to do rewrites (#111523) * Search coordinator uses event.ingested in cluster state to do rewrites Min/max range for the event.ingested timestamp field (part of Elastic Common Schema) was added to IndexMetadata in cluster state for searchable snapshots in #106252. This commit modifies the search coordinator to rewrite searches to MatchNone if the query searches a range of event.ingested that, from the min/max range in cluster state, is known to not overlap. This is the same behavior we currently have for the @timestamp field. --- docs/changelog/111523.yaml | 5 + .../TimestampFieldMapperServiceTests.java | 4 +- .../query/CoordinatorRewriteContext.java | 61 ++- .../CoordinatorRewriteContextProvider.java | 27 +- .../index/query/RangeQueryBuilder.java | 18 +- .../indices/DateFieldRangeInfo.java | 28 ++ .../elasticsearch/indices/IndicesService.java | 19 +- .../indices/TimestampFieldMapperService.java | 54 ++- .../CanMatchPreFilterSearchPhaseTests.java | 344 ++++++++++++--- .../test/AbstractBuilderTestCase.java | 11 +- .../index/engine/frozen/FrozenIndexIT.java | 163 ++++++- ...pshotsCanMatchOnCoordinatorIntegTests.java | 409 ++++++++++++++++-- 12 files changed, 962 insertions(+), 181 deletions(-) create mode 100644 docs/changelog/111523.yaml create mode 100644 server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java diff --git a/docs/changelog/111523.yaml b/docs/changelog/111523.yaml new file mode 100644 index 0000000000000..202d16c5a426d --- /dev/null +++ b/docs/changelog/111523.yaml @@ -0,0 +1,5 @@ +pr: 111523 +summary: Search coordinator uses `event.ingested` in cluster state to do rewrites +area: Search +type: enhancement +issues: [] diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java index 97959fa385241..eb35c44d30331 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java @@ -61,7 +61,7 @@ public void testGetTimestampFieldTypeForTsdbDataStream() throws IOException { DocWriteResponse indexResponse = indexDoc(); var indicesService = getInstanceFromNode(IndicesService.class); - var result = indicesService.getTimestampFieldType(indexResponse.getShardId().getIndex()); + var result = indicesService.getTimestampFieldTypeInfo(indexResponse.getShardId().getIndex()); assertThat(result, notNullValue()); } @@ -70,7 +70,7 @@ public void testGetTimestampFieldTypeForDataStream() throws IOException { DocWriteResponse indexResponse = indexDoc(); var indicesService = getInstanceFromNode(IndicesService.class); - var result = indicesService.getTimestampFieldType(indexResponse.getShardId().getIndex()); + var result = indicesService.getTimestampFieldTypeInfo(indexResponse.getShardId().getIndex()); assertThat(result, nullValue()); } diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java index ac6512b0839e6..7cb1b04972bfa 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java @@ -9,11 +9,13 @@ package org.elasticsearch.index.query; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.core.Nullable; -import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.util.Collections; @@ -23,19 +25,24 @@ * Context object used to rewrite {@link QueryBuilder} instances into simplified version in the coordinator. * Instances of this object rely on information stored in the {@code IndexMetadata} for certain indices. * Right now this context object is able to rewrite range queries that include a known timestamp field - * (i.e. the timestamp field for DataStreams) into a MatchNoneQueryBuilder and skip the shards that - * don't hold queried data. See IndexMetadata#getTimestampRange() for more details + * (i.e. the timestamp field for DataStreams or the 'event.ingested' field in ECS) into a MatchNoneQueryBuilder + * and skip the shards that don't hold queried data. See IndexMetadata for more details. */ public class CoordinatorRewriteContext extends QueryRewriteContext { - private final IndexLongFieldRange indexLongFieldRange; - private final DateFieldMapper.DateFieldType timestampFieldType; + private final DateFieldRangeInfo dateFieldRangeInfo; + /** + * Context for coordinator search rewrites based on time ranges for the @timestamp field and/or 'event.ingested' field + * @param parserConfig + * @param client + * @param nowInMillis + * @param dateFieldRangeInfo range and field type info for @timestamp and 'event.ingested' + */ public CoordinatorRewriteContext( XContentParserConfiguration parserConfig, Client client, LongSupplier nowInMillis, - IndexLongFieldRange indexLongFieldRange, - DateFieldMapper.DateFieldType timestampFieldType + DateFieldRangeInfo dateFieldRangeInfo ) { super( parserConfig, @@ -54,29 +61,37 @@ public CoordinatorRewriteContext( null, null ); - this.indexLongFieldRange = indexLongFieldRange; - this.timestampFieldType = timestampFieldType; - } - - long getMinTimestamp() { - return indexLongFieldRange.getMin(); - } - - long getMaxTimestamp() { - return indexLongFieldRange.getMax(); - } - - boolean hasTimestampData() { - return indexLongFieldRange.isComplete() && indexLongFieldRange != IndexLongFieldRange.EMPTY; + this.dateFieldRangeInfo = dateFieldRangeInfo; } + /** + * @param fieldName Must be one of DataStream.TIMESTAMP_FIELD_FIELD or IndexMetadata.EVENT_INGESTED_FIELD_NAME + * @return MappedField with type for the field. Returns null if fieldName is not one of the allowed field names. + */ @Nullable public MappedFieldType getFieldType(String fieldName) { - if (fieldName.equals(timestampFieldType.name()) == false) { + if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.timestampFieldType(); + } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.eventIngestedFieldType(); + } else { return null; } + } - return timestampFieldType; + /** + * @param fieldName Must be one of DataStream.TIMESTAMP_FIELD_FIELD or IndexMetadata.EVENT_INGESTED_FIELD_NAME + * @return IndexLongFieldRange with min/max ranges for the field. Returns null if fieldName is not one of the allowed field names. + */ + @Nullable + public IndexLongFieldRange getFieldRange(String fieldName) { + if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.timestampRange(); + } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.eventIngestedRange(); + } else { + return null; + } } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java index e44861b4afe8a..ec53dfe5c0d05 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java @@ -14,6 +14,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.util.function.Function; @@ -25,14 +26,14 @@ public class CoordinatorRewriteContextProvider { private final Client client; private final LongSupplier nowInMillis; private final Supplier clusterStateSupplier; - private final Function mappingSupplier; + private final Function mappingSupplier; public CoordinatorRewriteContextProvider( XContentParserConfiguration parserConfig, Client client, LongSupplier nowInMillis, Supplier clusterStateSupplier, - Function mappingSupplier + Function mappingSupplier ) { this.parserConfig = parserConfig; this.client = client; @@ -49,18 +50,30 @@ public CoordinatorRewriteContext getCoordinatorRewriteContext(Index index) { if (indexMetadata == null) { return null; } - DateFieldMapper.DateFieldType dateFieldType = mappingSupplier.apply(index); - if (dateFieldType == null) { + DateFieldRangeInfo dateFieldRangeInfo = mappingSupplier.apply(index); + if (dateFieldRangeInfo == null) { return null; } + DateFieldMapper.DateFieldType timestampFieldType = dateFieldRangeInfo.timestampFieldType(); IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); + IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); + if (timestampRange.containsAllShardRanges() == false) { - timestampRange = indexMetadata.getTimeSeriesTimestampRange(dateFieldType); - if (timestampRange == null) { + // if @timestamp range is not present or not ready in cluster state, fallback to using time series range (if present) + timestampRange = indexMetadata.getTimeSeriesTimestampRange(timestampFieldType); + // if timestampRange in the time series is null AND the eventIngestedRange is not ready for use, return null (no coord rewrite) + if (timestampRange == null && eventIngestedRange.containsAllShardRanges() == false) { return null; } } - return new CoordinatorRewriteContext(parserConfig, client, nowInMillis, timestampRange, dateFieldType); + // the DateFieldRangeInfo from the mappingSupplier only has field types, but not ranges + // so create a new object with ranges pulled from cluster state + return new CoordinatorRewriteContext( + parserConfig, + client, + nowInMillis, + new DateFieldRangeInfo(timestampFieldType, timestampRange, dateFieldRangeInfo.eventIngestedFieldType(), eventIngestedRange) + ); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index 4d2a6d3eaecdb..8b154b3845964 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -23,6 +23,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -436,15 +437,22 @@ public String getWriteableName() { protected MappedFieldType.Relation getRelation(final CoordinatorRewriteContext coordinatorRewriteContext) { final MappedFieldType fieldType = coordinatorRewriteContext.getFieldType(fieldName); if (fieldType instanceof final DateFieldMapper.DateFieldType dateFieldType) { - if (coordinatorRewriteContext.hasTimestampData() == false) { + IndexLongFieldRange fieldRange = coordinatorRewriteContext.getFieldRange(fieldName); + if (fieldRange.isComplete() == false || fieldRange == IndexLongFieldRange.EMPTY) { + // if not all shards for this (frozen) index have reported ranges to cluster state, OR if they + // have reported in and the range is empty (no data for that field), then return DISJOINT in order + // to rewrite the query to MatchNone return MappedFieldType.Relation.DISJOINT; } - long minTimestamp = coordinatorRewriteContext.getMinTimestamp(); - long maxTimestamp = coordinatorRewriteContext.getMaxTimestamp(); + if (fieldRange == IndexLongFieldRange.UNKNOWN) { + // do a full search if UNKNOWN for whatever reason (e.g., event.ingested is UNKNOWN in a + // mixed-cluster where nodes with a version before event.ingested was added to cluster state) + return MappedFieldType.Relation.INTERSECTS; + } DateMathParser dateMathParser = getForceDateParser(); return dateFieldType.isFieldWithinQuery( - minTimestamp, - maxTimestamp, + fieldRange.getMin(), + fieldRange.getMax(), from, to, includeLower, diff --git a/server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java b/server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java new file mode 100644 index 0000000000000..b631806e3ce95 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.shard.IndexLongFieldRange; + +/** + * Data holder of timestamp fields held in cluster state IndexMetadata. + * @param timestampFieldType field type for the @timestamp field + * @param timestampRange min/max range for the @timestamp field (in a specific index) + * @param eventIngestedFieldType field type for the 'event.ingested' field + * @param eventIngestedRange min/max range for the 'event.ingested' field (in a specific index) + */ +public record DateFieldRangeInfo( + DateFieldMapper.DateFieldType timestampFieldType, + IndexLongFieldRange timestampRange, + DateFieldMapper.DateFieldType eventIngestedFieldType, + IndexLongFieldRange eventIngestedRange +) { + +} diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 82a5c96bb7dc2..decc082d314e6 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -99,7 +99,6 @@ import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; -import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperRegistry; @@ -1766,7 +1765,13 @@ public DataRewriteContext getDataRewriteContext(LongSupplier nowInMillis) { } public CoordinatorRewriteContextProvider getCoordinatorRewriteContextProvider(LongSupplier nowInMillis) { - return new CoordinatorRewriteContextProvider(parserConfig, client, nowInMillis, clusterService::state, this::getTimestampFieldType); + return new CoordinatorRewriteContextProvider( + parserConfig, + client, + nowInMillis, + clusterService::state, + this::getTimestampFieldTypeInfo + ); } /** @@ -1856,14 +1861,16 @@ public boolean allPendingDanglingIndicesWritten() { } /** - * @return the field type of the {@code @timestamp} field of the given index, or {@code null} if: + * @return DateFieldRangeInfo holding the field types of the {@code @timestamp} and {@code event.ingested} fields of the index. + * or {@code null} if: * - the index is not found, * - the field is not found, or - * - the field is not a timestamp field. + * - the mapping is not known yet, or + * - the index does not have a useful timestamp field. */ @Nullable - public DateFieldMapper.DateFieldType getTimestampFieldType(Index index) { - return timestampFieldMapperService.getTimestampFieldType(index); + public DateFieldRangeInfo getTimestampFieldTypeInfo(Index index) { + return timestampFieldMapperService.getTimestampFieldTypeInfo(index); } public IndexScopedSettings getIndexScopedSettings() { diff --git a/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java b/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java index 4caeaef6514e5..b139fca5c2acc 100644 --- a/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java +++ b/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java @@ -42,8 +42,9 @@ import static org.elasticsearch.core.Strings.format; /** - * Tracks the mapping of the {@code @timestamp} field of immutable indices that expose their timestamp range in their index metadata. - * Coordinating nodes do not have (easy) access to mappings for all indices, so we extract the type of this one field from the mapping here. + * Tracks the mapping of the '@timestamp' and 'event.ingested' fields of immutable indices that expose their timestamp range in their + * index metadata. Coordinating nodes do not have (easy) access to mappings for all indices, so we extract the type of these two fields + * from the mapping here, since timestamp fields can have millis or nanos level resolution. */ public class TimestampFieldMapperService extends AbstractLifecycleComponent implements ClusterStateApplier { @@ -53,10 +54,12 @@ public class TimestampFieldMapperService extends AbstractLifecycleComponent impl private final ExecutorService executor; // single thread to construct mapper services async as needed /** - * The type of the {@code @timestamp} field keyed by index. Futures may be completed with {@code null} to indicate that there is - * no usable {@code @timestamp} field. + * The type of the 'event.ingested' and/or '@timestamp' fields keyed by index. + * The inner map is keyed by field name ('@timestamp' or 'event.ingested'). + * Futures may be completed with {@code null} to indicate that there is + * no usable timestamp field. */ - private final Map> fieldTypesByIndex = ConcurrentCollections.newConcurrentMap(); + private final Map> fieldTypesByIndex = ConcurrentCollections.newConcurrentMap(); public TimestampFieldMapperService(Settings settings, ThreadPool threadPool, IndicesService indicesService) { this.indicesService = indicesService; @@ -103,7 +106,7 @@ public void applyClusterState(ClusterChangedEvent event) { if (hasUsefulTimestampField(indexMetadata) && fieldTypesByIndex.containsKey(index) == false) { logger.trace("computing timestamp mapping for {}", index); - final PlainActionFuture future = new PlainActionFuture<>(); + final PlainActionFuture future = new PlainActionFuture<>(); fieldTypesByIndex.put(index, future); final IndexService indexService = indicesService.indexService(index); @@ -148,29 +151,45 @@ private static boolean hasUsefulTimestampField(IndexMetadata indexMetadata) { return true; } - final IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); - return timestampRange.isComplete() && timestampRange != IndexLongFieldRange.UNKNOWN; + IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); + if (timestampRange.isComplete() && timestampRange != IndexLongFieldRange.UNKNOWN) { + return true; + } + + IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); + return eventIngestedRange.isComplete() && eventIngestedRange != IndexLongFieldRange.UNKNOWN; } - private static DateFieldMapper.DateFieldType fromMapperService(MapperService mapperService) { - final MappedFieldType mappedFieldType = mapperService.fieldType(DataStream.TIMESTAMP_FIELD_NAME); - if (mappedFieldType instanceof DateFieldMapper.DateFieldType) { - return (DateFieldMapper.DateFieldType) mappedFieldType; - } else { + private static DateFieldRangeInfo fromMapperService(MapperService mapperService) { + DateFieldMapper.DateFieldType timestampFieldType = null; + DateFieldMapper.DateFieldType eventIngestedFieldType = null; + + MappedFieldType mappedFieldType = mapperService.fieldType(DataStream.TIMESTAMP_FIELD_NAME); + if (mappedFieldType instanceof DateFieldMapper.DateFieldType dateFieldType) { + timestampFieldType = dateFieldType; + } + mappedFieldType = mapperService.fieldType(IndexMetadata.EVENT_INGESTED_FIELD_NAME); + if (mappedFieldType instanceof DateFieldMapper.DateFieldType dateFieldType) { + eventIngestedFieldType = dateFieldType; + } + if (timestampFieldType == null && eventIngestedFieldType == null) { return null; } + // the mapper only fills in the field types, not the actual range values + return new DateFieldRangeInfo(timestampFieldType, null, eventIngestedFieldType, null); } /** - * @return the field type of the {@code @timestamp} field of the given index, or {@code null} if: + * @return DateFieldRangeInfo holding the field types of the {@code @timestamp} and {@code event.ingested} fields of the index. + * or {@code null} if: * - the index is not found, * - the field is not found, * - the mapping is not known yet, or - * - the field is not a timestamp field. + * - the index does not have a useful timestamp field. */ @Nullable - public DateFieldMapper.DateFieldType getTimestampFieldType(Index index) { - final PlainActionFuture future = fieldTypesByIndex.get(index); + public DateFieldRangeInfo getTimestampFieldTypeInfo(Index index) { + final PlainActionFuture future = fieldTypesByIndex.get(index); if (future == null || future.isDone() == false) { return null; } @@ -181,5 +200,4 @@ public DateFieldMapper.DateFieldType getTimestampFieldType(Index index) { throw new UncategorizedExecutionException("An error occurred fetching timestamp field type for " + index, e); } } - } diff --git a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 70c4d73f578b3..c450fd8a9c39c 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.CanMatchNodeResponse.ResponseOrFailure; @@ -26,8 +27,6 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexMode; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -38,6 +37,7 @@ import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.search.CanMatchShardResponse; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.SignificantTermsAggregationBuilder; @@ -72,6 +72,7 @@ import static org.elasticsearch.action.search.SearchAsyncActionTests.getShardsIter; import static org.elasticsearch.core.Types.forciblyCast; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.mockito.Mockito.mock; @@ -464,7 +465,17 @@ public void sendCanMatch( } } - public void testCanMatchFilteringOnCoordinatorThatCanBeSkipped() throws Exception { + // test using @timestamp + public void testCanMatchFilteringOnCoordinatorThatCanBeSkippedUsingTimestamp() throws Exception { + doCanMatchFilteringOnCoordinatorThatCanBeSkipped(DataStream.TIMESTAMP_FIELD_NAME); + } + + // test using event.ingested + public void testCanMatchFilteringOnCoordinatorThatCanBeSkippedUsingEventIngested() throws Exception { + doCanMatchFilteringOnCoordinatorThatCanBeSkipped(IndexMetadata.EVENT_INGESTED_FIELD_NAME); + } + + public void doCanMatchFilteringOnCoordinatorThatCanBeSkipped(String timestampField) throws Exception { Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); @@ -475,15 +486,10 @@ public void testCanMatchFilteringOnCoordinatorThatCanBeSkipped() throws Exceptio long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index dataStreamIndex : dataStream.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps( - dataStreamIndex, - DataStream.TIMESTAMP_FIELD_NAME, - indexMinTimestamp, - indexMaxTimestamp - ); + contextProviderBuilder.addIndexMinMaxTimestamps(dataStreamIndex, timestampField, indexMinTimestamp, indexMaxTimestamp); } - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timestampField); // We query a range outside of the timestamp range covered by both datastream indices rangeQueryBuilder.from(indexMaxTimestamp + 1).to(indexMaxTimestamp + 2); @@ -516,12 +522,12 @@ public void testCanMatchFilteringOnCoordinatorThatCanBeSkipped() throws Exceptio // When all the shards can be skipped we should query at least 1 // in order to get a valid search response. if (regularIndexShardCount == 0) { - assertThat(nonSkippedShards.size(), equalTo(1)); + assertThat(nonSkippedShards.size(), equalTo(1)); // FIXME - fails here with expected 1 but was 11 OR } else { boolean allNonSkippedShardsAreFromRegularIndices = nonSkippedShards.stream() .allMatch(shardIterator -> regularIndices.contains(shardIterator.shardId().getIndex())); - assertThat(allNonSkippedShardsAreFromRegularIndices, equalTo(true)); + assertThat(allNonSkippedShardsAreFromRegularIndices, equalTo(true)); // FIXME - OR fails here with "false" } boolean allSkippedShardAreFromDataStream = skippedShards.stream() @@ -535,26 +541,107 @@ public void testCanMatchFilteringOnCoordinatorThatCanBeSkipped() throws Exceptio ); } - public void testCanMatchFilteringOnCoordinatorParsingFails() throws Exception { - Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); - Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); + public void testCoordinatorCanMatchFilteringThatCanBeSkippedUsingBothTimestamps() throws Exception { + Index dataStreamIndex1 = new Index(".ds-twoTimestamps0001", UUIDs.base64UUID()); + Index dataStreamIndex2 = new Index(".ds-twoTimestamps0002", UUIDs.base64UUID()); DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); - List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); + List regularIndices = randomList(1, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); long indexMinTimestamp = randomLongBetween(0, 5000); long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index dataStreamIndex : dataStream.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps( + // use same range for both @timestamp and event.ingested + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested( dataStreamIndex, - DataStream.TIMESTAMP_FIELD_NAME, + indexMinTimestamp, + indexMaxTimestamp, indexMinTimestamp, indexMaxTimestamp ); } - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + /** + * Expected behavior: if either @timestamp or 'event.ingested' filters in the query are "out of range" (do not + * overlap the range in cluster state), then all shards in the datastream should be skipped. + * Only if both @timestamp or 'event.ingested' filters are "in range" should the data stream shards be searched + */ + boolean timestampQueryOutOfRange = randomBoolean(); + boolean eventIngestedQueryOutOfRange = randomBoolean(); + int timestampOffset = timestampQueryOutOfRange ? 1 : -500; + int eventIngestedOffset = eventIngestedQueryOutOfRange ? 1 : -500; + + RangeQueryBuilder tsRangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + tsRangeQueryBuilder.from(indexMaxTimestamp + timestampOffset).to(indexMaxTimestamp + 2); + + RangeQueryBuilder eventIngestedRangeQueryBuilder = new RangeQueryBuilder(IndexMetadata.EVENT_INGESTED_FIELD_NAME); + eventIngestedRangeQueryBuilder.from(indexMaxTimestamp + eventIngestedOffset).to(indexMaxTimestamp + 2); + + BoolQueryBuilder queryBuilder = new BoolQueryBuilder().filter(tsRangeQueryBuilder).filter(eventIngestedRangeQueryBuilder); + + if (randomBoolean()) { + // Add an additional filter that cannot be evaluated in the coordinator but shouldn't + // affect the end result as we're filtering + queryBuilder.filter(new TermQueryBuilder("fake", "value")); + } + + assignShardsAndExecuteCanMatchPhase( + List.of(dataStream), + regularIndices, + contextProviderBuilder.build(), + queryBuilder, + List.of(), + null, + (updatedSearchShardIterators, requests) -> { + List skippedShards = updatedSearchShardIterators.stream().filter(SearchShardIterator::skip).toList(); + List nonSkippedShards = updatedSearchShardIterators.stream() + .filter(searchShardIterator -> searchShardIterator.skip() == false) + .toList(); + + if (timestampQueryOutOfRange || eventIngestedQueryOutOfRange) { + // data stream shards should have been skipped + assertThat(skippedShards.size(), greaterThan(0)); + boolean allSkippedShardAreFromDataStream = skippedShards.stream() + .allMatch(shardIterator -> dataStream.getIndices().contains(shardIterator.shardId().getIndex())); + assertThat(allSkippedShardAreFromDataStream, equalTo(true)); + + boolean allNonSkippedShardsAreFromRegularIndices = nonSkippedShards.stream() + .allMatch(shardIterator -> regularIndices.contains(shardIterator.shardId().getIndex())); + assertThat(allNonSkippedShardsAreFromRegularIndices, equalTo(true)); + + boolean allRequestsWereTriggeredAgainstRegularIndices = requests.stream() + .allMatch(request -> regularIndices.contains(request.shardId().getIndex())); + assertThat(allRequestsWereTriggeredAgainstRegularIndices, equalTo(true)); + + } else { + assertThat(skippedShards.size(), equalTo(0)); + long countSkippedShardsFromDatastream = nonSkippedShards.stream() + .filter(iter -> dataStream.getIndices().contains(iter.shardId().getIndex())) + .count(); + assertThat(countSkippedShardsFromDatastream, greaterThan(0L)); + } + } + ); + } + + public void testCanMatchFilteringOnCoordinatorParsingFails() throws Exception { + Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); + Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); + DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); + + List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); + + String timeField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); + + long indexMinTimestamp = randomLongBetween(0, 5000); + long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); + for (Index dataStreamIndex : dataStream.getIndices()) { + contextProviderBuilder.addIndexMinMaxTimestamps(dataStreamIndex, timeField, indexMinTimestamp, indexMaxTimestamp); + } + + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timeField); // Query with a non default date format rangeQueryBuilder.from("2020-1-01").to("2021-1-01"); @@ -585,23 +672,20 @@ public void testCanMatchFilteringOnCoordinatorThatCanNotBeSkipped() throws Excep List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); + String timeField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); + long indexMinTimestamp = 10; long indexMaxTimestamp = 20; StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index dataStreamIndex : dataStream.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps( - dataStreamIndex, - DataStream.TIMESTAMP_FIELD_NAME, - indexMinTimestamp, - indexMaxTimestamp - ); + contextProviderBuilder.addIndexMinMaxTimestamps(dataStreamIndex, timeField, indexMinTimestamp, indexMaxTimestamp); } BoolQueryBuilder queryBuilder = new BoolQueryBuilder(); // Query inside of the data stream index range if (randomBoolean()) { // Query generation - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timeField); // We query a range within the timestamp range covered by both datastream indices rangeQueryBuilder.from(indexMinTimestamp).to(indexMaxTimestamp); @@ -614,8 +698,7 @@ public void testCanMatchFilteringOnCoordinatorThatCanNotBeSkipped() throws Excep } } else { // We query a range outside of the timestamp range covered by both datastream indices - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(indexMaxTimestamp + 1) - .to(indexMaxTimestamp + 2); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timeField).from(indexMaxTimestamp + 1).to(indexMaxTimestamp + 2); TermQueryBuilder termQueryBuilder = new TermQueryBuilder("fake", "value"); @@ -635,17 +718,86 @@ public void testCanMatchFilteringOnCoordinatorThatCanNotBeSkipped() throws Excep ); } + public void testCanMatchFilteringOnCoordinatorWithTimestampAndEventIngestedThatCanNotBeSkipped() throws Exception { + // Generate indices + Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); + Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); + DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); + + List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); + + long indexMinTimestampForTs = 10; + long indexMaxTimestampForTs = 20; + long indexMinTimestampForEventIngested = 10; + long indexMaxTimestampForEventIngested = 20; + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); + for (Index dataStreamIndex : dataStream.getIndices()) { + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested( + dataStreamIndex, + indexMinTimestampForTs, + indexMaxTimestampForTs, + indexMinTimestampForEventIngested, + indexMaxTimestampForEventIngested + ); + } + + BoolQueryBuilder queryBuilder = new BoolQueryBuilder(); + // Query inside of the data stream index range + if (randomBoolean()) { + // Query generation + // We query a range within both timestamp ranges covered by both datastream indices + RangeQueryBuilder tsRangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + tsRangeQueryBuilder.from(indexMinTimestampForTs).to(indexMaxTimestampForTs); + + RangeQueryBuilder eventIngestedRangeQueryBuilder = new RangeQueryBuilder(IndexMetadata.EVENT_INGESTED_FIELD_NAME); + eventIngestedRangeQueryBuilder.from(indexMinTimestampForEventIngested).to(indexMaxTimestampForEventIngested); + + queryBuilder.filter(tsRangeQueryBuilder).filter(eventIngestedRangeQueryBuilder); + + if (randomBoolean()) { + // Add an additional filter that cannot be evaluated in the coordinator but shouldn't + // affect the end result as we're filtering + queryBuilder.filter(new TermQueryBuilder("fake", "value")); + } + } else { + // We query a range outside of the both ranges covered by both datastream indices + RangeQueryBuilder tsRangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(indexMaxTimestampForTs + 1) + .to(indexMaxTimestampForTs + 2); + RangeQueryBuilder eventIngestedRangeQueryBuilder = new RangeQueryBuilder(IndexMetadata.EVENT_INGESTED_FIELD_NAME).from( + indexMaxTimestampForEventIngested + 1 + ).to(indexMaxTimestampForEventIngested + 2); + + TermQueryBuilder termQueryBuilder = new TermQueryBuilder("fake", "value"); + + // This is always evaluated as true in the coordinator as we cannot determine there if + // the term query clause is false. + queryBuilder.should(tsRangeQueryBuilder).should(eventIngestedRangeQueryBuilder).should(termQueryBuilder); + } + + assignShardsAndExecuteCanMatchPhase( + List.of(dataStream), + regularIndices, + contextProviderBuilder.build(), + queryBuilder, + List.of(), + null, + this::assertAllShardsAreQueried + ); + } + public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_withDefaultBackgroundFilter() throws Exception { Index index1 = new Index("index1", UUIDs.base64UUID()); Index index2 = new Index("index2", UUIDs.base64UUID()); Index index3 = new Index("index3", UUIDs.base64UUID()); + String timeField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - contextProviderBuilder.addIndexMinMaxTimestamps(index1, DataStream.TIMESTAMP_FIELD_NAME, 0, 999); - contextProviderBuilder.addIndexMinMaxTimestamps(index2, DataStream.TIMESTAMP_FIELD_NAME, 1000, 1999); - contextProviderBuilder.addIndexMinMaxTimestamps(index3, DataStream.TIMESTAMP_FIELD_NAME, 2000, 2999); + contextProviderBuilder.addIndexMinMaxTimestamps(index1, timeField, 0, 999); + contextProviderBuilder.addIndexMinMaxTimestamps(index2, timeField, 1000, 1999); + contextProviderBuilder.addIndexMinMaxTimestamps(index3, timeField, 2000, 2999); - QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(2100).to(2200)); + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(timeField).from(2100).to(2200)); AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms"); assignShardsAndExecuteCanMatchPhase( @@ -661,20 +813,22 @@ public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_w } public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_withBackgroundFilter() throws Exception { + String timestampField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + Index index1 = new Index("index1", UUIDs.base64UUID()); Index index2 = new Index("index2", UUIDs.base64UUID()); Index index3 = new Index("index3", UUIDs.base64UUID()); Index index4 = new Index("index4", UUIDs.base64UUID()); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - contextProviderBuilder.addIndexMinMaxTimestamps(index1, DataStream.TIMESTAMP_FIELD_NAME, 0, 999); - contextProviderBuilder.addIndexMinMaxTimestamps(index2, DataStream.TIMESTAMP_FIELD_NAME, 1000, 1999); - contextProviderBuilder.addIndexMinMaxTimestamps(index3, DataStream.TIMESTAMP_FIELD_NAME, 2000, 2999); - contextProviderBuilder.addIndexMinMaxTimestamps(index4, DataStream.TIMESTAMP_FIELD_NAME, 3000, 3999); + contextProviderBuilder.addIndexMinMaxTimestamps(index1, timestampField, 0, 999); + contextProviderBuilder.addIndexMinMaxTimestamps(index2, timestampField, 1000, 1999); + contextProviderBuilder.addIndexMinMaxTimestamps(index3, timestampField, 2000, 2999); + contextProviderBuilder.addIndexMinMaxTimestamps(index4, timestampField, 3000, 3999); - QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(3100).to(3200)); + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(timestampField).from(3100).to(3200)); AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms").backgroundFilter( - new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(0).to(1999) + new RangeQueryBuilder(timestampField).from(0).to(1999) ); assignShardsAndExecuteCanMatchPhase( @@ -703,14 +857,53 @@ public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_w Index index2 = new Index("index2", UUIDs.base64UUID()); Index index3 = new Index("index3", UUIDs.base64UUID()); + String timestampField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); + contextProviderBuilder.addIndexMinMaxTimestamps(index1, timestampField, 0, 999); + contextProviderBuilder.addIndexMinMaxTimestamps(index2, timestampField, 1000, 1999); + contextProviderBuilder.addIndexMinMaxTimestamps(index3, timestampField, 2000, 2999); + + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(timestampField).from(2100).to(2200)); + AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms").backgroundFilter( + new RangeQueryBuilder(timestampField).from(2000).to(2300) + ); + SuggestBuilder suggest = new SuggestBuilder().setGlobalText("test"); + + assignShardsAndExecuteCanMatchPhase( + List.of(), + List.of(index1, index2, index3), + contextProviderBuilder.build(), + query, + List.of(aggregation), + suggest, + // The query and aggregation and match only index3, but suggest should match everything. + this::assertAllShardsAreQueried + ); + } + + public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_withSuggest_withTwoTimestamps() throws Exception { + Index index1 = new Index("index1", UUIDs.base64UUID()); + Index index2 = new Index("index2", UUIDs.base64UUID()); + Index index3 = new Index("index3", UUIDs.base64UUID()); + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - contextProviderBuilder.addIndexMinMaxTimestamps(index1, DataStream.TIMESTAMP_FIELD_NAME, 0, 999); - contextProviderBuilder.addIndexMinMaxTimestamps(index2, DataStream.TIMESTAMP_FIELD_NAME, 1000, 1999); - contextProviderBuilder.addIndexMinMaxTimestamps(index3, DataStream.TIMESTAMP_FIELD_NAME, 2000, 2999); + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested(index1, 0, 999, 0, 999); + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested(index2, 1000, 1999, 1000, 1999); + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested(index3, 2000, 2999, 2000, 2999); + + String fieldInRange = IndexMetadata.EVENT_INGESTED_FIELD_NAME; + String fieldOutOfRange = DataStream.TIMESTAMP_FIELD_NAME; - QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(2100).to(2200)); + if (randomBoolean()) { + fieldInRange = DataStream.TIMESTAMP_FIELD_NAME; + fieldOutOfRange = IndexMetadata.EVENT_INGESTED_FIELD_NAME; + } + + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(fieldInRange).from(2100).to(2200)) + .filter(new RangeQueryBuilder(fieldOutOfRange).from(8888).to(9999)); AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms").backgroundFilter( - new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(2000).to(2300) + new RangeQueryBuilder(fieldInRange).from(2000).to(2300) ); SuggestBuilder suggest = new SuggestBuilder().setGlobalText("test"); @@ -744,13 +937,13 @@ public void testCanMatchFilteringOnCoordinatorThatCanBeSkippedTsdb() throws Exce long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index index : dataStream1.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps(index, indexMinTimestamp, indexMaxTimestamp); + contextProviderBuilder.addIndexMinMaxTimestamps(index, DataStream.TIMESTAMP_FIELD_NAME, indexMinTimestamp, indexMaxTimestamp); } for (Index index : dataStream2.getIndices()) { contextProviderBuilder.addIndex(index); } - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder("@timestamp"); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); // We query a range outside of the timestamp range covered by both datastream indices rangeQueryBuilder.from(indexMaxTimestamp + 1).to(indexMaxTimestamp + 2); @@ -954,9 +1147,9 @@ public void sendCanMatch( canMatchResultsConsumer.accept(updatedSearchShardIterators, requests); } - private static class StaticCoordinatorRewriteContextProviderBuilder { + static class StaticCoordinatorRewriteContextProviderBuilder { private ClusterState clusterState = ClusterState.EMPTY_STATE; - private final Map fields = new HashMap<>(); + private final Map fields = new HashMap<>(); private void addIndexMinMaxTimestamps(Index index, String fieldName, long minTimeStamp, long maxTimestamp) { if (clusterState.metadata().index(index) != null) { @@ -974,35 +1167,64 @@ private void addIndexMinMaxTimestamps(Index index, String fieldName, long minTim IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(index.getName()) .settings(indexSettings) .numberOfShards(1) - .numberOfReplicas(0) - .timestampRange(timestampRange); + .numberOfReplicas(0); + if (fieldName.equals(DataStream.TIMESTAMP_FIELD_NAME)) { + indexMetadataBuilder.timestampRange(timestampRange); + fields.put(index, new DateFieldRangeInfo(new DateFieldMapper.DateFieldType(fieldName), null, null, null)); + } else if (fieldName.equals(IndexMetadata.EVENT_INGESTED_FIELD_NAME)) { + indexMetadataBuilder.eventIngestedRange(timestampRange, TransportVersion.current()); + fields.put(index, new DateFieldRangeInfo(null, null, new DateFieldMapper.DateFieldType(fieldName), null)); + } Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).put(indexMetadataBuilder); - clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); - - fields.put(index, new DateFieldMapper.DateFieldType(fieldName)); } - private void addIndexMinMaxTimestamps(Index index, long minTimestamp, long maxTimestamp) { + /** + * Add min/max timestamps to IndexMetadata for the specified index for both @timestamp and 'event.ingested' + */ + private void addIndexMinMaxForTimestampAndEventIngested( + Index index, + long minTimestampForTs, + long maxTimestampForTs, + long minTimestampForEventIngested, + long maxTimestampForEventIngested + ) { if (clusterState.metadata().index(index) != null) { throw new IllegalArgumentException("Min/Max timestamps for " + index + " were already defined"); } - Settings.Builder indexSettings = settings(IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "a_field") - .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(minTimestamp)) - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(maxTimestamp)); + IndexLongFieldRange tsTimestampRange = IndexLongFieldRange.NO_SHARDS.extendWithShardRange( + 0, + 1, + ShardLongFieldRange.of(minTimestampForTs, maxTimestampForTs) + ); + IndexLongFieldRange eventIngestedTimestampRange = IndexLongFieldRange.NO_SHARDS.extendWithShardRange( + 0, + 1, + ShardLongFieldRange.of(minTimestampForEventIngested, maxTimestampForEventIngested) + ); + + Settings.Builder indexSettings = settings(IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()); IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(index.getName()) .settings(indexSettings) .numberOfShards(1) - .numberOfReplicas(0); + .numberOfReplicas(0) + .timestampRange(tsTimestampRange) + .eventIngestedRange(eventIngestedTimestampRange, TransportVersion.current()); Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).put(indexMetadataBuilder); clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); - fields.put(index, new DateFieldMapper.DateFieldType("@timestamp")); + fields.put( + index, + new DateFieldRangeInfo( + new DateFieldMapper.DateFieldType(DataStream.TIMESTAMP_FIELD_NAME), + null, + new DateFieldMapper.DateFieldType(IndexMetadata.EVENT_INGESTED_FIELD_NAME), + null + ) + ); } private void addIndex(Index index) { @@ -1018,7 +1240,7 @@ private void addIndex(Index index) { Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).put(indexMetadataBuilder); clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); - fields.put(index, new DateFieldMapper.DateFieldType("@timestamp")); + fields.put(index, new DateFieldRangeInfo(new DateFieldMapper.DateFieldType(DataStream.TIMESTAMP_FIELD_NAME), null, null, null)); } public CoordinatorRewriteContextProvider build() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index 2a3cc3a248f45..1634572e0b6b7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -59,6 +59,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardLongFieldRange; import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -623,13 +624,13 @@ QueryRewriteContext createQueryRewriteContext() { } CoordinatorRewriteContext createCoordinatorContext(DateFieldMapper.DateFieldType dateFieldType, long min, long max) { - return new CoordinatorRewriteContext( - parserConfiguration, - this.client, - () -> nowInMillis, + DateFieldRangeInfo timestampFieldInfo = new DateFieldRangeInfo( + dateFieldType, IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(min, max)), - dateFieldType + dateFieldType, + IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(min, max)) ); + return new CoordinatorRewriteContext(parserConfiguration, this.client, () -> nowInMillis, timestampFieldInfo); } DataRewriteContext createDataContext() { diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java index 36d4751423113..ad9900b5b0164 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.protocol.xpack.frozen.FreezeRequest; @@ -44,6 +45,7 @@ import java.time.Instant; import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING; @@ -76,8 +78,15 @@ public void testTimestampRangeRecalculatedOnStalePrimaryAllocation() throws IOEx createIndex("index", 1, 1); - final DocWriteResponse indexResponse = prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, "2010-01-06T02:03:04.567Z") - .get(); + String timestampVal = "2010-01-06T02:03:04.567Z"; + String eventIngestedVal = "2010-01-06T02:03:05.567Z"; // one second later + + final DocWriteResponse indexResponse = prepareIndex("index").setSource( + DataStream.TIMESTAMP_FIELD_NAME, + timestampVal, + IndexMetadata.EVENT_INGESTED_FIELD_NAME, + eventIngestedVal + ).get(); ensureGreen("index"); @@ -117,13 +126,23 @@ public void testTimestampRangeRecalculatedOnStalePrimaryAllocation() throws IOEx assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.EMPTY))); assertTrue(timestampFieldRange.isComplete()); - assertThat(timestampFieldRange.getMin(), equalTo(Instant.parse("2010-01-06T02:03:04.567Z").toEpochMilli())); - assertThat(timestampFieldRange.getMax(), equalTo(Instant.parse("2010-01-06T02:03:04.567Z").toEpochMilli())); + assertThat(timestampFieldRange.getMin(), equalTo(Instant.parse(timestampVal).toEpochMilli())); + assertThat(timestampFieldRange.getMax(), equalTo(Instant.parse(timestampVal).toEpochMilli())); - assertThat(indexMetadata.getEventIngestedRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); + IndexLongFieldRange eventIngestedFieldRange = clusterAdmin().prepareState() + .get() + .getState() + .metadata() + .index("index") + .getEventIngestedRange(); + assertThat(eventIngestedFieldRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); + assertThat(eventIngestedFieldRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertTrue(eventIngestedFieldRange.isComplete()); + assertThat(eventIngestedFieldRange.getMin(), equalTo(Instant.parse(eventIngestedVal).toEpochMilli())); + assertThat(eventIngestedFieldRange.getMax(), equalTo(Instant.parse(eventIngestedVal).toEpochMilli())); } - public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception { + public void testTimestampAndEventIngestedFieldTypeExposedByAllIndicesServices() throws Exception { internalCluster().startNodes(between(2, 4)); final String locale; @@ -181,11 +200,11 @@ public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception ensureGreen("index"); if (randomBoolean()) { - prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, date).get(); + prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, date, IndexMetadata.EVENT_INGESTED_FIELD_NAME, date).get(); } for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - assertNull(indicesService.getTimestampFieldType(index)); + assertNull(indicesService.getTimestampFieldTypeInfo(index)); } assertAcked( @@ -193,15 +212,129 @@ public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception ); ensureGreen("index"); for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - final PlainActionFuture timestampFieldTypeFuture = new PlainActionFuture<>(); + final PlainActionFuture> future = new PlainActionFuture<>(); assertBusy(() -> { - final DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(index); + DateFieldRangeInfo timestampsFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(index); + DateFieldMapper.DateFieldType timestampFieldType = timestampsFieldTypeInfo.timestampFieldType(); + DateFieldMapper.DateFieldType eventIngestedFieldType = timestampsFieldTypeInfo.eventIngestedFieldType(); + assertNotNull(eventIngestedFieldType); assertNotNull(timestampFieldType); - timestampFieldTypeFuture.onResponse(timestampFieldType); + future.onResponse( + Map.of( + DataStream.TIMESTAMP_FIELD_NAME, + timestampFieldType, + IndexMetadata.EVENT_INGESTED_FIELD_NAME, + eventIngestedFieldType + ) + ); + }); + assertTrue(future.isDone()); + assertThat(future.get().get(DataStream.TIMESTAMP_FIELD_NAME).dateTimeFormatter().locale().toString(), equalTo(locale)); + assertThat(future.get().get(DataStream.TIMESTAMP_FIELD_NAME).dateTimeFormatter().parseMillis(date), equalTo(1580817683000L)); + assertThat(future.get().get(IndexMetadata.EVENT_INGESTED_FIELD_NAME).dateTimeFormatter().locale().toString(), equalTo(locale)); + assertThat( + future.get().get(IndexMetadata.EVENT_INGESTED_FIELD_NAME).dateTimeFormatter().parseMillis(date), + equalTo(1580817683000L) + ); + } + + assertAcked( + client().execute( + FreezeIndexAction.INSTANCE, + new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index").setFreeze(false) + ).actionGet() + ); + ensureGreen("index"); + for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + assertNull(indicesService.getTimestampFieldTypeInfo(index)); + } + } + + public void testTimestampOrEventIngestedFieldTypeExposedByAllIndicesServices() throws Exception { + internalCluster().startNodes(between(2, 4)); + + final String locale; + final String date; + + switch (between(1, 3)) { + case 1 -> { + locale = ""; + date = "04 Feb 2020 12:01:23Z"; + } + case 2 -> { + locale = "en_GB"; + date = "04 Feb 2020 12:01:23Z"; + } + case 3 -> { + locale = "fr_FR"; + date = "04 févr. 2020 12:01:23Z"; + } + default -> throw new AssertionError("impossible"); + } + + String timeField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + assertAcked( + prepareCreate("index").setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + .setMapping( + jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject(timeField) + .field("type", "date") + .field("format", "dd LLL yyyy HH:mm:ssX") + .field("locale", locale) + .endObject() + .endObject() + .endObject() + .endObject() + ) + ); + + final Index index = clusterAdmin().prepareState() + .clear() + .setIndices("index") + .setMetadata(true) + .get() + .getState() + .metadata() + .index("index") + .getIndex(); + + ensureGreen("index"); + if (randomBoolean()) { + prepareIndex("index").setSource(timeField, date).get(); + } + + for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + assertNull(indicesService.getTimestampFieldTypeInfo(index)); + } + + assertAcked( + client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index")).actionGet() + ); + ensureGreen("index"); + for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + // final PlainActionFuture timestampFieldTypeFuture = new PlainActionFuture<>(); + final PlainActionFuture> future = new PlainActionFuture<>(); + assertBusy(() -> { + DateFieldRangeInfo timestampsFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(index); + DateFieldMapper.DateFieldType timestampFieldType = timestampsFieldTypeInfo.timestampFieldType(); + DateFieldMapper.DateFieldType eventIngestedFieldType = timestampsFieldTypeInfo.eventIngestedFieldType(); + if (timeField == DataStream.TIMESTAMP_FIELD_NAME) { + assertNotNull(timestampFieldType); + assertNull(eventIngestedFieldType); + future.onResponse(Map.of(timeField, timestampFieldType)); + } else { + assertNull(timestampFieldType); + assertNotNull(eventIngestedFieldType); + future.onResponse(Map.of(timeField, eventIngestedFieldType)); + } }); - assertTrue(timestampFieldTypeFuture.isDone()); - assertThat(timestampFieldTypeFuture.get().dateTimeFormatter().locale().toString(), equalTo(locale)); - assertThat(timestampFieldTypeFuture.get().dateTimeFormatter().parseMillis(date), equalTo(1580817683000L)); + assertTrue(future.isDone()); + assertThat(future.get().get(timeField).dateTimeFormatter().locale().toString(), equalTo(locale)); + assertThat(future.get().get(timeField).dateTimeFormatter().parseMillis(date), equalTo(1580817683000L)); } assertAcked( @@ -212,7 +345,7 @@ public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception ); ensureGreen("index"); for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - assertNull(indicesService.getTimestampFieldType(index)); + assertNull(indicesService.getTimestampFieldTypeInfo(index)); } } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java index 5204bdfcc78e6..d5e87558d1ced 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.Plugin; @@ -100,11 +101,11 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying final String indexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexOutsideSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestamp(indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.EMPTY); + createIndexWithTimestampAndEventIngested(indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.EMPTY); final String indexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexWithinSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestamp( + createIndexWithTimestampAndEventIngested( indexWithinSearchRange, indexWithinSearchRangeShardCount, Settings.builder() @@ -117,11 +118,10 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying // Either add data outside of the range, or documents that don't have timestamp data final boolean indexDataWithTimestamp = randomBoolean(); // Add enough documents to have non-metadata segment files in all shards, - // otherwise the mount operation might go through as the read won't be - // blocked + // otherwise the mount operation might go through as the read won't be blocked final int numberOfDocsInIndexOutsideSearchRange = between(350, 1000); if (indexDataWithTimestamp) { - indexDocumentsWithTimestampWithinDate( + indexDocumentsWithTimestampAndEventIngestedDates( indexOutsideSearchRange, numberOfDocsInIndexOutsideSearchRange, TIMESTAMP_TEMPLATE_OUTSIDE_RANGE @@ -132,7 +132,7 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying // Index enough documents to ensure that all shards have at least some documents int numDocsWithinRange = between(100, 1000); - indexDocumentsWithTimestampWithinDate(indexWithinSearchRange, numDocsWithinRange, TIMESTAMP_TEMPLATE_WITHIN_RANGE); + indexDocumentsWithTimestampAndEventIngestedDates(indexWithinSearchRange, numDocsWithinRange, TIMESTAMP_TEMPLATE_WITHIN_RANGE); final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createRepository(repositoryName, "mock"); @@ -166,9 +166,10 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex()); - assertThat(timestampFieldType, nullValue()); + DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(indexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, nullValue()); final boolean includeIndexCoveringSearchRangeInSearchRequest = randomBoolean(); List indicesToSearch = new ArrayList<>(); @@ -176,7 +177,9 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying indicesToSearch.add(indexWithinSearchRange); } indicesToSearch.add(searchableSnapshotIndexOutsideSearchRange); - RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME) + + String timeField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timeField) .from("2020-11-28T00:00:00.000000000Z", true) .to("2020-11-29T00:00:00.000000000Z"); @@ -250,20 +253,44 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying ensureGreen(searchableSnapshotIndexOutsideSearchRange); final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); + + // check that @timestamp and 'event.ingested' are now in cluster state final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); - final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex()); - assertThat(dateFieldType, notNullValue()); - final DateFieldMapper.Resolution resolution = dateFieldType.resolution(); assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true)); + final IndexLongFieldRange updatedEventIngestedRange = updatedIndexMetadata.getEventIngestedRange(); + assertThat(updatedEventIngestedRange.isComplete(), equalTo(true)); + + timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); + final DateFieldMapper.DateFieldType timestampDataFieldType = timestampFieldTypeInfo.timestampFieldType(); + assertThat(timestampDataFieldType, notNullValue()); + final DateFieldMapper.DateFieldType eventIngestedDataFieldType = timestampFieldTypeInfo.eventIngestedFieldType(); + assertThat(eventIngestedDataFieldType, notNullValue()); + + final DateFieldMapper.Resolution timestampResolution = timestampDataFieldType.resolution(); + final DateFieldMapper.Resolution eventIngestedResolution = eventIngestedDataFieldType.resolution(); if (indexDataWithTimestamp) { assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); assertThat( updatedTimestampMillisRange.getMin(), - greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + greaterThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + ); + assertThat( + updatedTimestampMillisRange.getMax(), + lessThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) + ); + + assertThat(updatedEventIngestedRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat( + updatedEventIngestedRange.getMin(), + greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + ); + assertThat( + updatedEventIngestedRange.getMax(), + lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) ); - assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-27T00:00:00Z")))); } else { assertThat(updatedTimestampMillisRange, sameInstance(IndexLongFieldRange.EMPTY)); + assertThat(updatedEventIngestedRange, sameInstance(IndexLongFieldRange.EMPTY)); } // Stop the node holding the searchable snapshots, and since we defined @@ -383,6 +410,171 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying } } + /** + * Test shard skipping when only 'event.ingested' is in the index and cluster state. + */ + public void testEventIngestedRangeInSearchAgainstSearchableSnapshotShards() throws Exception { + internalCluster().startMasterOnlyNode(); + internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + final String dataNodeHoldingRegularIndex = internalCluster().startDataOnlyNode(); + final String dataNodeHoldingSearchableSnapshot = internalCluster().startDataOnlyNode(); + final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNodeHoldingSearchableSnapshot); + + final String indexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final int indexOutsideSearchRangeShardCount = randomIntBetween(1, 3); + + final String timestampField = IndexMetadata.EVENT_INGESTED_FIELD_NAME; + + createIndexWithOnlyOneTimestampField(timestampField, indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.EMPTY); + + final String indexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final int indexWithinSearchRangeShardCount = randomIntBetween(1, 3); + createIndexWithOnlyOneTimestampField( + timestampField, + indexWithinSearchRange, + indexWithinSearchRangeShardCount, + Settings.builder() + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingRegularIndex) + .build() + ); + + final int totalShards = indexOutsideSearchRangeShardCount + indexWithinSearchRangeShardCount; + + // Add enough documents to have non-metadata segment files in all shards, + // otherwise the mount operation might go through as the read won't be blocked + final int numberOfDocsInIndexOutsideSearchRange = between(350, 1000); + + indexDocumentsWithOnlyOneTimestampField( + timestampField, + indexOutsideSearchRange, + numberOfDocsInIndexOutsideSearchRange, + TIMESTAMP_TEMPLATE_OUTSIDE_RANGE + ); + + // Index enough documents to ensure that all shards have at least some documents + int numDocsWithinRange = between(100, 1000); + indexDocumentsWithOnlyOneTimestampField( + timestampField, + indexWithinSearchRange, + numDocsWithinRange, + TIMESTAMP_TEMPLATE_WITHIN_RANGE + ); + + final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createRepository(repositoryName, "mock"); + + final SnapshotId snapshotId = createSnapshot(repositoryName, "snapshot-1", List.of(indexOutsideSearchRange)).snapshotId(); + assertAcked(indicesAdmin().prepareDelete(indexOutsideSearchRange)); + + final String searchableSnapshotIndexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + + // Block the repository for the node holding the searchable snapshot shards + // to delay its restore + blockDataNode(repositoryName, dataNodeHoldingSearchableSnapshot); + + // Force the searchable snapshot to be allocated in a particular node + Settings restoredIndexSettings = Settings.builder() + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingSearchableSnapshot) + .build(); + + final MountSearchableSnapshotRequest mountRequest = new MountSearchableSnapshotRequest( + TEST_REQUEST_TIMEOUT, + searchableSnapshotIndexOutsideSearchRange, + repositoryName, + snapshotId.getName(), + indexOutsideSearchRange, + restoredIndexSettings, + Strings.EMPTY_ARRAY, + false, + randomFrom(MountSearchableSnapshotRequest.Storage.values()) + ); + client().execute(MountSearchableSnapshotAction.INSTANCE, mountRequest).actionGet(); + + final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); + assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + + // Allow the searchable snapshots to be finally mounted + unblockNode(repositoryName, dataNodeHoldingSearchableSnapshot); + waitUntilRecoveryIsDone(searchableSnapshotIndexOutsideSearchRange); + ensureGreen(searchableSnapshotIndexOutsideSearchRange); + + IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); + IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); + IndexLongFieldRange updatedEventIngestedMillisRange = updatedIndexMetadata.getEventIngestedRange(); + + // @timestamp range should be null since it was not included in the index or indexed docs + assertThat(updatedTimestampMillisRange, equalTo(IndexLongFieldRange.UNKNOWN)); + assertThat(updatedEventIngestedMillisRange, not(equalTo(IndexLongFieldRange.UNKNOWN))); + + DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); + + DateFieldMapper.DateFieldType timestampDataFieldType = timestampFieldTypeInfo.timestampFieldType(); + assertThat(timestampDataFieldType, nullValue()); + + DateFieldMapper.DateFieldType eventIngestedFieldType = timestampFieldTypeInfo.eventIngestedFieldType(); + assertThat(eventIngestedFieldType, notNullValue()); + + DateFieldMapper.Resolution eventIngestedResolution = eventIngestedFieldType.resolution(); + assertThat(updatedEventIngestedMillisRange.isComplete(), equalTo(true)); + assertThat( + updatedEventIngestedMillisRange.getMin(), + greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + ); + assertThat( + updatedEventIngestedMillisRange.getMax(), + lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) + ); + + // now do a search against event.ingested + List indicesToSearch = new ArrayList<>(); + indicesToSearch.add(indexWithinSearchRange); + indicesToSearch.add(searchableSnapshotIndexOutsideSearchRange); + + { + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timestampField) + .from("2020-11-28T00:00:00.000000000Z", true) + .to("2020-11-29T00:00:00.000000000Z"); + + SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) + .source(new SearchSourceBuilder().query(rangeQuery)); + + assertResponse(client().search(request), searchResponse -> { + // All the regular index searches succeeded + assertThat(searchResponse.getSuccessfulShards(), equalTo(totalShards)); + assertThat(searchResponse.getFailedShards(), equalTo(0)); + // All the searchable snapshots shards were skipped + assertThat(searchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(searchResponse.getTotalShards(), equalTo(totalShards)); + }); + + SearchShardAPIResult searchShardResult = doSearchShardAPIQuery(indicesToSearch, rangeQuery, true, totalShards); + assertThat(searchShardResult.skipped().size(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(searchShardResult.notSkipped().size(), equalTo(indexWithinSearchRangeShardCount)); + } + + // query a range that covers both indexes - all shards should be searched, none skipped + { + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timestampField) + .from("2019-11-28T00:00:00.000000000Z", true) + .to("2021-11-29T00:00:00.000000000Z"); + + SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) + .source(new SearchSourceBuilder().query(rangeQuery)); + + assertResponse(client().search(request), searchResponse -> { + assertThat(searchResponse.getSuccessfulShards(), equalTo(totalShards)); + assertThat(searchResponse.getFailedShards(), equalTo(0)); + assertThat(searchResponse.getSkippedShards(), equalTo(0)); + assertThat(searchResponse.getTotalShards(), equalTo(totalShards)); + }); + + SearchShardAPIResult searchShardResult = doSearchShardAPIQuery(indicesToSearch, rangeQuery, true, totalShards); + assertThat(searchShardResult.skipped().size(), equalTo(0)); + assertThat(searchShardResult.notSkipped().size(), equalTo(totalShards)); + } + } + /** * Can match against searchable snapshots is tested via both the Search API and the SearchShards (transport-only) API. * The latter is a way to do only a can-match rather than all search phases. @@ -396,7 +588,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() final String indexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexOutsideSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestamp( + createIndexWithTimestampAndEventIngested( indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.builder() @@ -404,7 +596,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() .build() ); - indexDocumentsWithTimestampWithinDate(indexOutsideSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_OUTSIDE_RANGE); + indexDocumentsWithTimestampAndEventIngestedDates(indexOutsideSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_OUTSIDE_RANGE); final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createRepository(repositoryName, "mock"); @@ -438,11 +630,14 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex()); - assertThat(timestampFieldType, nullValue()); + DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(indexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, nullValue()); - RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME) + final String timestampField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); + + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timestampField) .from("2020-11-28T00:00:00.000000000Z", true) .to("2020-11-29T00:00:00.000000000Z"); @@ -500,14 +695,29 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() ensureGreen(searchableSnapshotIndexOutsideSearchRange); final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); - final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); - final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex()); - assertThat(dateFieldType, notNullValue()); - final DateFieldMapper.Resolution resolution = dateFieldType.resolution(); - assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true)); - assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); - assertThat(updatedTimestampMillisRange.getMin(), greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-26T00:00:00Z")))); - assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-27T00:00:00Z")))); + timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, notNullValue()); + + final IndexLongFieldRange updatedTimestampRange = updatedIndexMetadata.getTimestampRange(); + DateFieldMapper.Resolution tsResolution = timestampFieldTypeInfo.timestampFieldType().resolution(); + ; + assertThat(updatedTimestampRange.isComplete(), equalTo(true)); + assertThat(updatedTimestampRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat(updatedTimestampRange.getMin(), greaterThanOrEqualTo(tsResolution.convert(Instant.parse("2020-11-26T00:00:00Z")))); + assertThat(updatedTimestampRange.getMax(), lessThanOrEqualTo(tsResolution.convert(Instant.parse("2020-11-27T00:00:00Z")))); + + final IndexLongFieldRange updatedEventIngestedRange = updatedIndexMetadata.getEventIngestedRange(); + DateFieldMapper.Resolution eventIngestedResolution = timestampFieldTypeInfo.eventIngestedFieldType().resolution(); + assertThat(updatedEventIngestedRange.isComplete(), equalTo(true)); + assertThat(updatedEventIngestedRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat( + updatedEventIngestedRange.getMin(), + greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + ); + assertThat( + updatedEventIngestedRange.getMax(), + lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) + ); // Stop the node holding the searchable snapshots, and since we defined // the index allocation criteria to require the searchable snapshot @@ -579,7 +789,7 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo final String indexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexWithinSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestamp( + createIndexWithTimestampAndEventIngested( indexWithinSearchRange, indexWithinSearchRangeShardCount, Settings.builder() @@ -587,7 +797,7 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo .build() ); - indexDocumentsWithTimestampWithinDate(indexWithinSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_WITHIN_RANGE); + indexDocumentsWithTimestampAndEventIngestedDates(indexWithinSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_WITHIN_RANGE); final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createRepository(repositoryName, "mock"); @@ -621,11 +831,13 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexWithinSearchRange); assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex()); - assertThat(timestampFieldType, nullValue()); + DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(indexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, nullValue()); - RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME) + String timeField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timeField) .from("2020-11-28T00:00:00.000000000Z", true) .to("2020-11-29T00:00:00.000000000Z"); @@ -680,13 +892,32 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexWithinSearchRange); final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); - final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex()); - assertThat(dateFieldType, notNullValue()); - final DateFieldMapper.Resolution resolution = dateFieldType.resolution(); + timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, notNullValue()); + final DateFieldMapper.Resolution timestampResolution = timestampFieldTypeInfo.timestampFieldType().resolution(); assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true)); assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); - assertThat(updatedTimestampMillisRange.getMin(), greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-28T00:00:00Z")))); - assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-29T00:00:00Z")))); + assertThat( + updatedTimestampMillisRange.getMin(), + greaterThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-28T00:00:00Z"))) + ); + assertThat( + updatedTimestampMillisRange.getMax(), + lessThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-29T00:00:00Z"))) + ); + + final IndexLongFieldRange updatedEventIngestedMillisRange = updatedIndexMetadata.getEventIngestedRange(); + final DateFieldMapper.Resolution eventIngestedResolution = timestampFieldTypeInfo.eventIngestedFieldType().resolution(); + assertThat(updatedEventIngestedMillisRange.isComplete(), equalTo(true)); + assertThat(updatedEventIngestedMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat( + updatedEventIngestedMillisRange.getMin(), + greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-28T00:00:00Z"))) + ); + assertThat( + updatedEventIngestedMillisRange.getMax(), + lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-29T00:00:00Z"))) + ); // Stop the node holding the searchable snapshots, and since we defined // the index allocation criteria to require the searchable snapshot @@ -724,17 +955,24 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo } } - private void createIndexWithTimestamp(String indexName, int numShards, Settings extraSettings) throws IOException { + private void createIndexWithTimestampAndEventIngested(String indexName, int numShards, Settings extraSettings) throws IOException { assertAcked( indicesAdmin().prepareCreate(indexName) .setMapping( XContentFactory.jsonBuilder() .startObject() .startObject("properties") + .startObject(DataStream.TIMESTAMP_FIELD_NAME) .field("type", randomFrom("date", "date_nanos")) .field("format", "strict_date_optional_time_nanos") .endObject() + + .startObject(IndexMetadata.EVENT_INGESTED_FIELD_NAME) + .field("type", randomFrom("date", "date_nanos")) + .field("format", "strict_date_optional_time_nanos") + .endObject() + .endObject() .endObject() ) @@ -743,12 +981,70 @@ private void createIndexWithTimestamp(String indexName, int numShards, Settings ensureGreen(indexName); } - private void indexDocumentsWithTimestampWithinDate(String indexName, int docCount, String timestampTemplate) throws Exception { + private void createIndexWithOnlyOneTimestampField(String timestampField, String index, int numShards, Settings extraSettings) + throws IOException { + assertAcked( + indicesAdmin().prepareCreate(index) + .setMapping( + XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + + .startObject(timestampField) + .field("type", randomFrom("date", "date_nanos")) + .field("format", "strict_date_optional_time_nanos") + .endObject() + + .endObject() + .endObject() + ) + .setSettings(indexSettingsNoReplicas(numShards).put(INDEX_SOFT_DELETES_SETTING.getKey(), true).put(extraSettings)) + ); + ensureGreen(index); + } + + private void indexDocumentsWithOnlyOneTimestampField(String timestampField, String index, int docCount, String timestampTemplate) + throws Exception { + final List indexRequestBuilders = new ArrayList<>(); + for (int i = 0; i < docCount; i++) { + indexRequestBuilders.add( + prepareIndex(index).setSource( + timestampField, + String.format( + Locale.ROOT, + timestampTemplate, + between(0, 23), + between(0, 59), + between(0, 59), + randomLongBetween(0, 999999999L) + ) + ) + ); + } + indexRandom(true, false, indexRequestBuilders); + + assertThat(indicesAdmin().prepareForceMerge(index).setOnlyExpungeDeletes(true).setFlush(true).get().getFailedShards(), equalTo(0)); + refresh(index); + forceMerge(); + } + + private void indexDocumentsWithTimestampAndEventIngestedDates(String indexName, int docCount, String timestampTemplate) + throws Exception { + final List indexRequestBuilders = new ArrayList<>(); for (int i = 0; i < docCount; i++) { indexRequestBuilders.add( prepareIndex(indexName).setSource( DataStream.TIMESTAMP_FIELD_NAME, + String.format( + Locale.ROOT, + timestampTemplate, + between(0, 23), + between(0, 59), + between(0, 59), + randomLongBetween(0, 999999999L) + ), + IndexMetadata.EVENT_INGESTED_FIELD_NAME, String.format( Locale.ROOT, timestampTemplate, @@ -789,4 +1085,39 @@ private void waitUntilRecoveryIsDone(String index) throws Exception { private void waitUntilAllShardsAreUnassigned(Index index) throws Exception { awaitClusterState(state -> state.getRoutingTable().index(index).allPrimaryShardsUnassigned()); } + + record SearchShardAPIResult(List skipped, List notSkipped) {} + + private static SearchShardAPIResult doSearchShardAPIQuery( + List indicesToSearch, + RangeQueryBuilder rangeQuery, + boolean allowPartialSearchResults, + int expectedTotalShards + ) { + SearchShardsRequest searchShardsRequest = new SearchShardsRequest( + indicesToSearch.toArray(new String[0]), + SearchRequest.DEFAULT_INDICES_OPTIONS, + rangeQuery, + null, + null, + allowPartialSearchResults, + null + ); + + SearchShardsResponse searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet(); + assertThat(searchShardsResponse.getGroups().size(), equalTo(expectedTotalShards)); + List> partitionedBySkipped = searchShardsResponse.getGroups() + .stream() + .collect( + Collectors.teeing( + Collectors.filtering(g -> g.skipped(), Collectors.toList()), + Collectors.filtering(g -> g.skipped() == false, Collectors.toList()), + List::of + ) + ); + + List skipped = partitionedBySkipped.get(0); + List notSkipped = partitionedBySkipped.get(1); + return new SearchShardAPIResult(skipped, notSkipped); + } } From 1d1e8d1e3bcf3092a4b5ee6b295bc35024d37fb7 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Mon, 26 Aug 2024 16:39:39 +0200 Subject: [PATCH 206/389] Introduce a XYQueryUtils to be used by _search and ESQL to generate Cartesian queries (#112204) --- .../lucene/spatial/XYQueriesUtils.java | 56 ++++++--- .../CartesianPointDocValuesQueryTests.java | 116 ++++++++++++++++++ .../CartesianShapeDocValuesQueryTests.java | 36 ++---- .../scalar/spatial/SpatialContains.java | 5 +- .../scalar/spatial/SpatialDisjoint.java | 5 +- .../scalar/spatial/SpatialIntersects.java | 5 +- .../spatial/SpatialRelatesFunction.java | 3 +- .../scalar/spatial/SpatialWithin.java | 5 +- .../planner/EsqlExpressionTranslators.java | 4 +- .../querydsl/query/SpatialRelatesQuery.java | 88 ++----------- .../index/mapper/PointFieldMapper.java | 7 +- .../index/mapper/ShapeFieldMapper.java | 7 +- .../index/query/ShapeQueryProcessor.java | 39 ------ .../spatial/ingest/CircleProcessorTests.java | 7 +- 14 files changed, 202 insertions(+), 181 deletions(-) rename x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java => server/src/main/java/org/elasticsearch/lucene/spatial/XYQueriesUtils.java (80%) create mode 100644 server/src/test/java/org/elasticsearch/lucene/spatial/CartesianPointDocValuesQueryTests.java delete mode 100644 x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryProcessor.java diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java b/server/src/main/java/org/elasticsearch/lucene/spatial/XYQueriesUtils.java similarity index 80% rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java rename to server/src/main/java/org/elasticsearch/lucene/spatial/XYQueriesUtils.java index 22616eabf8211..23aaf2ab16722 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java +++ b/server/src/main/java/org/elasticsearch/lucene/spatial/XYQueriesUtils.java @@ -1,13 +1,16 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. */ -package org.elasticsearch.xpack.spatial.index.query; + +package org.elasticsearch.lucene.spatial; import org.apache.lucene.document.XYDocValuesField; import org.apache.lucene.document.XYPointField; +import org.apache.lucene.document.XYShape; import org.apache.lucene.geo.Component2D; import org.apache.lucene.geo.XYGeometry; import org.apache.lucene.geo.XYPoint; @@ -25,23 +28,25 @@ import java.util.Arrays; -/** Generates a lucene query for a spatial query over a point field. - * - * Note that lucene only supports intersects spatial relation so we build other relations - * using just that one. - * */ -public class ShapeQueryPointProcessor { +/** Utility methods that generate a lucene query for a spatial query over a cartesian field.* */ +public class XYQueriesUtils { - public Query shapeQuery(Geometry geometry, String fieldName, ShapeRelation relation, boolean isIndexed, boolean hasDocValues) { - assert isIndexed || hasDocValues; + /** Generates a lucene query for a field that has been previously indexed using {@link XYPoint}.It expects + * either {code indexed} or {@code has docValues} to be true or or both to be true. + * + * Note that lucene only supports intersects spatial relation so we build other relations + * using just that one. + * */ + public static Query toXYPointQuery(Geometry geometry, String fieldName, ShapeRelation relation, boolean indexed, boolean hasDocValues) { + assert indexed || hasDocValues; final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); // XYPointField only supports intersects query so we build all the relationships using that logic. // it is not very efficient but it works. return switch (relation) { - case INTERSECTS -> buildIntersectsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); - case DISJOINT -> buildDisjointQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); - case CONTAINS -> buildContainsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); - case WITHIN -> buildWithinQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); + case INTERSECTS -> buildIntersectsQuery(fieldName, indexed, hasDocValues, luceneGeometries); + case DISJOINT -> buildDisjointQuery(fieldName, indexed, hasDocValues, luceneGeometries); + case CONTAINS -> buildContainsQuery(fieldName, indexed, hasDocValues, luceneGeometries); + case WITHIN -> buildWithinQuery(fieldName, indexed, hasDocValues, luceneGeometries); }; } @@ -276,4 +281,25 @@ public int hashCode() { return Arrays.hashCode(geometries); } } + + /** Generates a lucene query for a field that has been previously indexed using {@link XYShape}.It expects + * either {code indexed} or {@code has docValues} to be true or both to be true. */ + public static Query toXYShapeQuery(Geometry geometry, String fieldName, ShapeRelation relation, boolean indexed, boolean hasDocValues) { + assert indexed || hasDocValues; + if (geometry == null || geometry.isEmpty()) { + return new MatchNoDocsQuery(); + } + final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); + Query query; + if (indexed) { + query = XYShape.newGeometryQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); + if (hasDocValues) { + final Query queryDocValues = new CartesianShapeDocValuesQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); + query = new IndexOrDocValuesQuery(query, queryDocValues); + } + } else { + query = new CartesianShapeDocValuesQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); + } + return query; + } } diff --git a/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianPointDocValuesQueryTests.java b/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianPointDocValuesQueryTests.java new file mode 100644 index 0000000000000..ff9074dba52eb --- /dev/null +++ b/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianPointDocValuesQueryTests.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.lucene.spatial; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.XYDocValuesField; +import org.apache.lucene.document.XYPointField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.SerialMergeScheduler; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.search.CheckHits; +import org.elasticsearch.common.geo.ShapeRelation; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.geo.ShapeTestUtils; +import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class CartesianPointDocValuesQueryTests extends ESTestCase { + + private static final String FIELD_NAME = "field"; + + public void testIndexSimpleShapes() throws Exception { + IndexWriterConfig iwc = newIndexWriterConfig(); + // Else seeds may not reproduce: + iwc.setMergeScheduler(new SerialMergeScheduler()); + // Else we can get O(N^2) merging: + iwc.setMaxBufferedDocs(10); + Directory dir = newDirectory(); + // RandomIndexWriter is too slow here: + IndexWriter w = new IndexWriter(dir, iwc); + final int numDocs = randomIntBetween(10, 1000); + for (int id = 0; id < numDocs; id++) { + Document doc = new Document(); + Point point = ShapeTestUtils.randomPoint(); + doc.add(new XYPointField(FIELD_NAME, (float) point.getX(), (float) point.getY())); + doc.add(new XYDocValuesField(FIELD_NAME, (float) point.getX(), (float) point.getY())); + w.addDocument(doc); + } + + if (random().nextBoolean()) { + w.forceMerge(1); + } + final IndexReader r = DirectoryReader.open(w); + w.close(); + + IndexSearcher s = newSearcher(r); + for (int i = 0; i < 25; i++) { + Geometry geometry = ShapeTestUtils.randomGeometry(false); + for (ShapeRelation relation : ShapeRelation.values()) { + Query indexQuery = XYQueriesUtils.toXYPointQuery(geometry, FIELD_NAME, relation, true, false); + Query docValQuery = XYQueriesUtils.toXYPointQuery(geometry, FIELD_NAME, relation, false, true); + assertQueries(s, indexQuery, docValQuery, numDocs); + } + } + IOUtils.close(r, dir); + } + + public void testIndexMultiShapes() throws Exception { + IndexWriterConfig iwc = newIndexWriterConfig(); + // Else seeds may not reproduce: + iwc.setMergeScheduler(new SerialMergeScheduler()); + // Else we can get O(N^2) merging: + iwc.setMaxBufferedDocs(10); + Directory dir = newDirectory(); + // RandomIndexWriter is too slow here: + IndexWriter w = new IndexWriter(dir, iwc); + final int numDocs = randomIntBetween(10, 100); + CartesianShapeIndexer indexer = new CartesianShapeIndexer(FIELD_NAME); + for (int id = 0; id < numDocs; id++) { + Document doc = new Document(); + for (int i = 0; i < randomIntBetween(1, 5); i++) { + Point point = ShapeTestUtils.randomPoint(); + doc.add(new XYPointField(FIELD_NAME, (float) point.getX(), (float) point.getY())); + doc.add(new XYDocValuesField(FIELD_NAME, (float) point.getX(), (float) point.getY())); + w.addDocument(doc); + } + w.addDocument(doc); + } + + if (random().nextBoolean()) { + w.forceMerge(1); + } + final IndexReader r = DirectoryReader.open(w); + w.close(); + + IndexSearcher s = newSearcher(r); + for (int i = 0; i < 25; i++) { + Geometry geometry = ShapeTestUtils.randomGeometry(false); + for (ShapeRelation relation : ShapeRelation.values()) { + Query indexQuery = XYQueriesUtils.toXYPointQuery(geometry, FIELD_NAME, relation, true, false); + Query docValQuery = XYQueriesUtils.toXYPointQuery(geometry, FIELD_NAME, relation, false, true); + assertQueries(s, indexQuery, docValQuery, numDocs); + } + } + IOUtils.close(r, dir); + } + + private void assertQueries(IndexSearcher s, Query indexQuery, Query docValQuery, int numDocs) throws IOException { + assertEquals(s.count(indexQuery), s.count(docValQuery)); + CheckHits.checkEqual(docValQuery, s.search(indexQuery, numDocs).scoreDocs, s.search(docValQuery, numDocs).scoreDocs); + } +} diff --git a/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQueryTests.java b/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQueryTests.java index 9ee84fcaa352f..e98b9016cca1c 100644 --- a/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQueryTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQueryTests.java @@ -11,7 +11,6 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.ShapeField; import org.apache.lucene.document.XYShape; -import org.apache.lucene.geo.XYGeometry; import org.apache.lucene.geo.XYPolygon; import org.apache.lucene.geo.XYRectangle; import org.apache.lucene.index.DirectoryReader; @@ -27,6 +26,7 @@ import org.apache.lucene.tests.search.CheckHits; import org.apache.lucene.tests.search.QueryUtils; import org.elasticsearch.common.geo.LuceneGeometriesUtils; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.core.IOUtils; import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geo.XShapeTestUtil; @@ -128,10 +128,10 @@ public void testIndexSimpleShapes() throws Exception { IndexSearcher s = newSearcher(r); for (int i = 0; i < 25; i++) { - XYGeometry[] geometries = randomLuceneQueryGeometries(); - for (ShapeField.QueryRelation relation : ShapeField.QueryRelation.values()) { - Query indexQuery = XYShape.newGeometryQuery(FIELD_NAME, relation, geometries); - Query docValQuery = new CartesianShapeDocValuesQuery(FIELD_NAME, relation, geometries); + Geometry geometry = ShapeTestUtils.randomGeometry(false); + for (ShapeRelation relation : ShapeRelation.values()) { + Query indexQuery = XYQueriesUtils.toXYShapeQuery(geometry, FIELD_NAME, relation, true, false); + Query docValQuery = XYQueriesUtils.toXYShapeQuery(geometry, FIELD_NAME, relation, false, true); assertQueries(s, indexQuery, docValQuery, numDocs); } } @@ -170,10 +170,10 @@ public void testIndexMultiShapes() throws Exception { IndexSearcher s = newSearcher(r); for (int i = 0; i < 25; i++) { - XYGeometry[] geometries = randomLuceneQueryGeometries(); - for (ShapeField.QueryRelation relation : ShapeField.QueryRelation.values()) { - Query indexQuery = XYShape.newGeometryQuery(FIELD_NAME, relation, geometries); - Query docValQuery = new CartesianShapeDocValuesQuery(FIELD_NAME, relation, geometries); + Geometry geometry = ShapeTestUtils.randomGeometry(false); + for (ShapeRelation relation : ShapeRelation.values()) { + Query indexQuery = XYQueriesUtils.toXYShapeQuery(geometry, FIELD_NAME, relation, true, false); + Query docValQuery = XYQueriesUtils.toXYShapeQuery(geometry, FIELD_NAME, relation, false, true); assertQueries(s, indexQuery, docValQuery, numDocs); } } @@ -184,22 +184,4 @@ private void assertQueries(IndexSearcher s, Query indexQuery, Query docValQuery, assertEquals(s.count(indexQuery), s.count(docValQuery)); CheckHits.checkEqual(docValQuery, s.search(indexQuery, numDocs).scoreDocs, s.search(docValQuery, numDocs).scoreDocs); } - - private XYGeometry[] randomLuceneQueryGeometries() { - int numGeom = randomIntBetween(1, 3); - XYGeometry[] geometries = new XYGeometry[numGeom]; - for (int i = 0; i < numGeom; i++) { - geometries[i] = randomLuceneQueryGeometry(); - } - return geometries; - } - - private XYGeometry randomLuceneQueryGeometry() { - return switch (randomInt(3)) { - case 0 -> LuceneGeometriesUtils.toXYPolygon(ShapeTestUtils.randomPolygon(false)); - case 1 -> LuceneGeometriesUtils.toXYCircle(ShapeTestUtils.randomCircle(false)); - case 2 -> LuceneGeometriesUtils.toXYPoint(ShapeTestUtils.randomPoint(false)); - default -> XShapeTestUtil.nextBox(); - }; - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java index 6cb3c34ba8b1f..6788d13cf345e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java @@ -11,6 +11,7 @@ import org.apache.lucene.geo.Component2D; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.Evaluator; @@ -151,8 +152,8 @@ public String getWriteableName() { } @Override - public ShapeField.QueryRelation queryRelation() { - return ShapeField.QueryRelation.CONTAINS; + public ShapeRelation queryRelation() { + return ShapeRelation.CONTAINS; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java index d04dc9e1a6b07..eac50f84bd12d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java @@ -11,6 +11,7 @@ import org.apache.lucene.geo.Component2D; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.Evaluator; @@ -106,8 +107,8 @@ public String getWriteableName() { } @Override - public ShapeField.QueryRelation queryRelation() { - return ShapeField.QueryRelation.DISJOINT; + public ShapeRelation queryRelation() { + return ShapeRelation.DISJOINT; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java index 48e99989c5699..886551d1f3154 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java @@ -11,6 +11,7 @@ import org.apache.lucene.geo.Component2D; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.Evaluator; @@ -104,8 +105,8 @@ public String getWriteableName() { } @Override - public ShapeField.QueryRelation queryRelation() { - return ShapeField.QueryRelation.INTERSECTS; + public ShapeRelation queryRelation() { + return ShapeRelation.INTERSECTS; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesFunction.java index 927c7aed936da..36e98984d2303 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesFunction.java @@ -10,6 +10,7 @@ import org.apache.lucene.document.ShapeField; import org.apache.lucene.geo.Component2D; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.geometry.Geometry; @@ -50,7 +51,7 @@ protected SpatialRelatesFunction(StreamInput in, boolean leftDocValues, boolean super(in, leftDocValues, rightDocValues, false); } - public abstract ShapeField.QueryRelation queryRelation(); + public abstract ShapeRelation queryRelation(); @Override public DataType dataType() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java index c204468ae17d1..0b210f07a02f4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java @@ -11,6 +11,7 @@ import org.apache.lucene.geo.Component2D; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.Evaluator; @@ -106,8 +107,8 @@ public String getWriteableName() { } @Override - public ShapeField.QueryRelation queryRelation() { - return ShapeField.QueryRelation.WITHIN; + public ShapeRelation queryRelation() { + return ShapeRelation.WITHIN; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java index 854018f577bd9..b508dc6556456 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.esql.planner; -import org.apache.lucene.document.ShapeField; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.geometry.Geometry; @@ -370,7 +370,7 @@ protected Query asQuery(SpatialRelatesFunction bc, TranslatorHandler handler) { return doTranslate(bc, handler); } - public static void checkSpatialRelatesFunction(Expression constantExpression, ShapeField.QueryRelation queryRelation) { + public static void checkSpatialRelatesFunction(Expression constantExpression, ShapeRelation queryRelation) { Check.isTrue( constantExpression.foldable(), "Line {}:{}: Comparisons against fields are not (currently) supported; offender [{}] in [ST_{}]", diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java index d1e4e12f73868..4f0bcbb43e260 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java @@ -7,29 +7,18 @@ package org.elasticsearch.xpack.esql.querydsl.query; -import org.apache.lucene.document.ShapeField; -import org.apache.lucene.document.XYDocValuesField; -import org.apache.lucene.document.XYPointField; -import org.apache.lucene.document.XYShape; -import org.apache.lucene.geo.XYGeometry; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.IndexOrDocValuesQuery; -import org.apache.lucene.search.MatchNoDocsQuery; import org.elasticsearch.TransportVersion; -import org.elasticsearch.common.geo.LuceneGeometriesUtils; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.GeoShapeQueryable; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.query.ExistsQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.lucene.spatial.CartesianShapeDocValuesQuery; +import org.elasticsearch.lucene.spatial.XYQueriesUtils; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.esql.core.querydsl.query.Query; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -42,11 +31,11 @@ public class SpatialRelatesQuery extends Query { private final String field; - private final ShapeField.QueryRelation queryRelation; + private final ShapeRelation queryRelation; private final Geometry shape; private final DataType dataType; - public SpatialRelatesQuery(Source source, String field, ShapeField.QueryRelation queryRelation, Geometry shape, DataType dataType) { + public SpatialRelatesQuery(Source source, String field, ShapeRelation queryRelation, Geometry shape, DataType dataType) { super(source); this.field = field; this.queryRelation = queryRelation; @@ -205,87 +194,36 @@ org.apache.lucene.search.Query buildShapeQuery(SearchExecutionContext context, M return new ConstantScoreQuery(innerQuery); } - /** - * This code is based on the ShapeQueryPointProcessor.shapeQuery() method, with additional support for two special cases: - *

      - *
    • - * DISJOINT queries (using {@code EXISTS && !INTERSECTS}, similar to {@code LegacyGeoShapeQueryProcessor.geoShapeQuery()}) - *
    • - *
    • - * CONTAINS queries (if the shape is a point, INTERSECTS is used, otherwise a MatchNoDocsQuery is built, - * similar to {@code LatLonPoint.makeContainsGeometryQuery()}) - *
    • - *
    - */ private static org.apache.lucene.search.Query pointShapeQuery( Geometry geometry, String fieldName, - ShapeField.QueryRelation relation, + ShapeRelation relation, SearchExecutionContext context ) { - final boolean hasDocValues = context.getFieldType(fieldName).hasDocValues(); - if (geometry == null || geometry.isEmpty()) { - throw new QueryShardException(context, "Invalid/empty geometry"); - } - final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); - if (isPointGeometry(luceneGeometries) == false && relation == ShapeField.QueryRelation.CONTAINS) { - return new MatchNoDocsQuery("A point field can never contain a non-point geometry"); - } - org.apache.lucene.search.Query intersects = XYPointField.newGeometryQuery(fieldName, luceneGeometries); - if (relation == ShapeField.QueryRelation.DISJOINT) { - // XYPointField does not support DISJOINT queries, so we build one as EXISTS && !INTERSECTS - BooleanQuery.Builder bool = new BooleanQuery.Builder(); - org.apache.lucene.search.Query exists = ExistsQueryBuilder.newFilter(context, fieldName, false); - bool.add(exists, BooleanClause.Occur.MUST); - bool.add(intersects, BooleanClause.Occur.MUST_NOT); - return bool.build(); - } - - // Point-Intersects works for all cases except CONTAINS(shape) and DISJOINT, which are handled separately above - if (hasDocValues) { - final org.apache.lucene.search.Query queryDocValues = XYDocValuesField.newSlowGeometryQuery(fieldName, luceneGeometries); - intersects = new IndexOrDocValuesQuery(intersects, queryDocValues); + final MappedFieldType fieldType = context.getFieldType(fieldName); + try { + return XYQueriesUtils.toXYPointQuery(geometry, fieldName, relation, fieldType.isIndexed(), fieldType.hasDocValues()); + } catch (IllegalArgumentException e) { + throw new QueryShardException(context, "Exception creating query on Field [" + fieldName + "] " + e.getMessage(), e); } - return intersects; - } - - private static boolean isPointGeometry(XYGeometry[] geometries) { - return geometries.length == 1 && geometries[0] instanceof org.apache.lucene.geo.XYPoint; } - /** - * This code is based on the ShapeQueryProcessor.shapeQuery() method - */ private static org.apache.lucene.search.Query shapeShapeQuery( Geometry geometry, String fieldName, - ShapeField.QueryRelation relation, + ShapeRelation relation, SearchExecutionContext context ) { - final boolean hasDocValues = context.getFieldType(fieldName).hasDocValues(); // CONTAINS queries are not supported by VECTOR strategy for indices created before version 7.5.0 (Lucene 8.3.0); - if (relation == ShapeField.QueryRelation.CONTAINS && context.indexVersionCreated().before(IndexVersions.V_7_5_0)) { + if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(IndexVersions.V_7_5_0)) { throw new QueryShardException(context, relation + " query relation not supported for Field [" + fieldName + "]."); } - if (geometry == null || geometry.isEmpty()) { - throw new QueryShardException(context, "Invalid/empty geometry"); - } - final XYGeometry[] luceneGeometries; + final MappedFieldType fieldType = context.getFieldType(fieldName); try { - luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); + return XYQueriesUtils.toXYShapeQuery(geometry, fieldName, relation, fieldType.isIndexed(), fieldType.hasDocValues()); } catch (IllegalArgumentException e) { throw new QueryShardException(context, "Exception creating query on Field [" + fieldName + "] " + e.getMessage(), e); } - org.apache.lucene.search.Query query = XYShape.newGeometryQuery(fieldName, relation, luceneGeometries); - if (hasDocValues) { - final org.apache.lucene.search.Query queryDocValues = new CartesianShapeDocValuesQuery( - fieldName, - relation, - luceneGeometries - ); - query = new IndexOrDocValuesQuery(query, queryDocValues); - } - return query; } } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java index 9412dc3c5eb53..2901e374003dd 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java @@ -25,11 +25,11 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.lucene.spatial.XYQueriesUtils; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.spatial.common.CartesianPoint; import org.elasticsearch.xpack.spatial.index.fielddata.plain.CartesianPointIndexFieldData; -import org.elasticsearch.xpack.spatial.index.query.ShapeQueryPointProcessor; import org.elasticsearch.xpack.spatial.script.field.CartesianPointDocValuesField; import org.elasticsearch.xpack.spatial.search.aggregations.support.CartesianPointValuesSourceType; @@ -178,8 +178,6 @@ public FieldMapper.Builder getMergeBuilder() { public static class PointFieldType extends AbstractPointFieldType implements ShapeQueryable { - private final ShapeQueryPointProcessor queryProcessor; - private PointFieldType( String name, boolean indexed, @@ -190,7 +188,6 @@ private PointFieldType( Map meta ) { super(name, indexed, stored, hasDocValues, parser, nullValue, meta); - this.queryProcessor = new ShapeQueryPointProcessor(); } // only used in test @@ -216,7 +213,7 @@ public String typeName() { @Override public Query shapeQuery(Geometry shape, String fieldName, ShapeRelation relation, SearchExecutionContext context) { failIfNotIndexedNorDocValuesFallback(context); - return queryProcessor.shapeQuery(shape, fieldName, relation, isIndexed(), hasDocValues()); + return XYQueriesUtils.toXYPointQuery(shape, fieldName, relation, isIndexed(), hasDocValues()); } @Override diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java index ab57efee527dc..0ea8c3e22e288 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java @@ -31,6 +31,7 @@ import org.elasticsearch.lucene.spatial.BinaryShapeDocValuesField; import org.elasticsearch.lucene.spatial.CartesianShapeIndexer; import org.elasticsearch.lucene.spatial.CoordinateEncoder; +import org.elasticsearch.lucene.spatial.XYQueriesUtils; import org.elasticsearch.script.field.AbstractScriptFieldFactory; import org.elasticsearch.script.field.DocValuesScriptFieldFactory; import org.elasticsearch.script.field.Field; @@ -39,7 +40,6 @@ import org.elasticsearch.xpack.spatial.index.fielddata.CartesianShapeValues; import org.elasticsearch.xpack.spatial.index.fielddata.plain.AbstractAtomicCartesianShapeFieldData; import org.elasticsearch.xpack.spatial.index.fielddata.plain.CartesianShapeIndexFieldData; -import org.elasticsearch.xpack.spatial.index.query.ShapeQueryProcessor; import org.elasticsearch.xpack.spatial.search.aggregations.support.CartesianShapeValuesSourceType; import java.io.IOException; @@ -137,8 +137,6 @@ public ShapeFieldMapper build(MapperBuilderContext context) { public static final class ShapeFieldType extends AbstractShapeGeometryFieldType implements ShapeQueryable { - private final ShapeQueryProcessor queryProcessor; - public ShapeFieldType( String name, boolean indexed, @@ -148,7 +146,6 @@ public ShapeFieldType( Map meta ) { super(name, indexed, false, hasDocValues, parser, orientation, meta); - this.queryProcessor = new ShapeQueryProcessor(); } @Override @@ -172,7 +169,7 @@ public Query shapeQuery(Geometry shape, String fieldName, ShapeRelation relation ); } try { - return queryProcessor.shapeQuery(shape, fieldName, relation, isIndexed(), hasDocValues()); + return XYQueriesUtils.toXYShapeQuery(shape, fieldName, relation, isIndexed(), hasDocValues()); } catch (IllegalArgumentException e) { throw new QueryShardException(context, "Exception creating query on Field [" + fieldName + "] " + e.getMessage(), e); } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryProcessor.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryProcessor.java deleted file mode 100644 index 25a0e55c027f5..0000000000000 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryProcessor.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.spatial.index.query; - -import org.apache.lucene.document.XYShape; -import org.apache.lucene.geo.XYGeometry; -import org.apache.lucene.search.IndexOrDocValuesQuery; -import org.apache.lucene.search.MatchNoDocsQuery; -import org.apache.lucene.search.Query; -import org.elasticsearch.common.geo.LuceneGeometriesUtils; -import org.elasticsearch.common.geo.ShapeRelation; -import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.lucene.spatial.CartesianShapeDocValuesQuery; - -public class ShapeQueryProcessor { - - public Query shapeQuery(Geometry geometry, String fieldName, ShapeRelation relation, boolean indexed, boolean hasDocValues) { - assert indexed || hasDocValues; - if (geometry == null || geometry.isEmpty()) { - return new MatchNoDocsQuery(); - } - final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); - Query query; - if (indexed) { - query = XYShape.newGeometryQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); - if (hasDocValues) { - final Query queryDocValues = new CartesianShapeDocValuesQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); - query = new IndexOrDocValuesQuery(query, queryDocValues); - } - } else { - query = new CartesianShapeDocValuesQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); - } - return query; - } -} diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java index e71b4f0f4e981..66f5597be543e 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java @@ -32,13 +32,13 @@ import org.elasticsearch.ingest.RandomDocumentPicks; import org.elasticsearch.ingest.TestIngestDocument; import org.elasticsearch.lucene.spatial.CartesianShapeIndexer; +import org.elasticsearch.lucene.spatial.XYQueriesUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.spatial.index.mapper.GeoShapeWithDocValuesFieldMapper.GeoShapeWithDocValuesFieldType; -import org.elasticsearch.xpack.spatial.index.query.ShapeQueryProcessor; import java.io.IOException; import java.util.Collections; @@ -242,9 +242,8 @@ public void testShapeQuery() throws IOException { int numSides = randomIntBetween(4, 1000); Geometry geometry = CircleUtils.createRegularShapePolygon(circle, numSides); - ShapeQueryProcessor processor = new ShapeQueryProcessor(); - Query sameShapeQuery = processor.shapeQuery(geometry, fieldName, ShapeRelation.INTERSECTS, true, true); - Query centerPointQuery = processor.shapeQuery( + Query sameShapeQuery = XYQueriesUtils.toXYShapeQuery(geometry, fieldName, ShapeRelation.INTERSECTS, true, true); + Query centerPointQuery = XYQueriesUtils.toXYShapeQuery( new Point(circle.getLon(), circle.getLat()), fieldName, ShapeRelation.INTERSECTS, From e07a3951859cc0d19ac320aec7568a9e0ff6400a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Mon, 26 Aug 2024 16:44:06 +0200 Subject: [PATCH 207/389] Fix testPEMKeyConfigReloading intermittent failures (#112086) Relates: https://github.com/elastic/elasticsearch/issues/101427 The purpose of `testPEMKeyConfigReloading` is to make sure `SSLConfigurationReloader` works as expected when the ssl public key and certificate files the ssl config points to are changed. The `SSLConfigurationReloader` uses a `ResourceWatcherService` to notify a provided consumer when the files have changed. In this test we first setup a web server using a valid certificate and key that the client trusts and then we validate that the SSL handshake is ok. We then modify the key and cert file to contain a certificate that's not trusted by the client and validate that the SSL handshake fails ("PKIX path validation failed"). Overwriting the key and certificate is a two step non-atomic process (two file overwrites). The test was failing intermittently because sometimes the one second refresh interval happened between the two overwrites, so only the public key was overwritten, resulting in an unexpected (to the test) error: "Signature length not correct: got 512 but was expecting 256". This can be reproduced by commenting out the certificate overwrite. The proposed fix is to separate the two file overwrite operations into two separate reload events by sleeping for the reload interval + 1ms between each file overwrite. I also lowered the interval to make sure the test isn't slower than before. --- .../ssl/SSLConfigurationReloaderTests.java | 41 +++++++++++++++---- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java index 7b19d53663a08..4d1ebf6cbaabc 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java @@ -68,6 +68,7 @@ import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; @@ -201,14 +202,19 @@ public void testPEMKeyConfigReloading() throws Exception { throw new RuntimeException("Exception starting or connecting to the mock server", e); } }; - final Runnable modifier = () -> { + final List modifierFunctions = List.of(() -> { try { atomicMoveIfPossible(updatedKeyPath, keyPath); + } catch (Exception e) { + throw new RuntimeException("failed to modify file", e); + } + }, () -> { + try { atomicMoveIfPossible(updatedCertPath, certPath); } catch (Exception e) { throw new RuntimeException("failed to modify file", e); } - }; + }); // The new server certificate is not in the client's truststore so SSLHandshake should fail final Consumer keyMaterialPostChecks = (updatedContext) -> { @@ -224,7 +230,7 @@ public void testPEMKeyConfigReloading() throws Exception { throw new RuntimeException("Exception starting or connecting to the mock server", e); } }; - validateSSLConfigurationIsReloaded(env, keyMaterialPreChecks, modifier, keyMaterialPostChecks); + validateSSLConfigurationIsReloaded(env, keyMaterialPreChecks, modifierFunctions, keyMaterialPostChecks); } } @@ -559,28 +565,45 @@ private Settings.Builder baseKeystoreSettings(Path tempDir, MockSecureSettings s private void validateSSLConfigurationIsReloaded( Environment env, Consumer preChecks, - Runnable modificationFunction, + Runnable modifierFunction, + Consumer postChecks + ) throws Exception { + validateSSLConfigurationIsReloaded(env, preChecks, List.of(modifierFunction), postChecks); + } + + private void validateSSLConfigurationIsReloaded( + Environment env, + Consumer preChecks, + List modifierFunctions, Consumer postChecks ) throws Exception { - final CountDownLatch reloadLatch = new CountDownLatch(1); + final CyclicBarrier reloadBarrier = new CyclicBarrier(2); final SSLService sslService = new SSLService(env); final SslConfiguration config = sslService.getSSLConfiguration("xpack.security.transport.ssl"); final Consumer reloadConsumer = sslConfiguration -> { try { sslService.reloadSSLContext(sslConfiguration); } finally { - reloadLatch.countDown(); + try { + reloadBarrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } } }; new SSLConfigurationReloader(reloadConsumer, resourceWatcherService, SSLService.getSSLConfigurations(env).values()); // Baseline checks preChecks.accept(sslService.sslContextHolder(config).sslContext()); - assertEquals("nothing should have called reload", 1, reloadLatch.getCount()); + assertEquals("nothing should have called reload", 0, reloadBarrier.getNumberWaiting()); // modify - modificationFunction.run(); - reloadLatch.await(); + for (var modifierFunction : modifierFunctions) { + modifierFunction.run(); + reloadBarrier.await(); + reloadBarrier.reset(); + } + // checks after reload postChecks.accept(sslService.sslContextHolder(config).sslContext()); } From eaf27bdda38d3a94bdf099ac8e1f0ba0feab72da Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 27 Aug 2024 01:28:10 +1000 Subject: [PATCH 208/389] Mute org.elasticsearch.xpack.ml.integration.MlJobIT testDeleteJobAsync #112212 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 77ec7800f8a4d..85c29759cabb2 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -169,6 +169,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT method: test {mv_percentile.FromIndex ASYNC} issue: https://github.com/elastic/elasticsearch/issues/112193 +- class: org.elasticsearch.xpack.ml.integration.MlJobIT + method: testDeleteJobAsync + issue: https://github.com/elastic/elasticsearch/issues/112212 # Examples: # From 5580b91d522b615a261d46eb743316dfcd5cf11f Mon Sep 17 00:00:00 2001 From: Fang Xing <155562079+fang-xing-esql@users.noreply.github.com> Date: Mon, 26 Aug 2024 12:17:50 -0400 Subject: [PATCH 209/389] [ES|QL] Name parameter with leading underscore (#111950) * name parameter with leading underscore --- docs/changelog/111950.yaml | 6 + .../xpack/esql/core/util/StringUtils.java | 31 +- .../xpack/esql/qa/rest/RestEsqlTestCase.java | 9 +- .../esql/src/main/antlr/EsqlBaseLexer.g4 | 2 +- .../xpack/esql/action/RequestXContent.java | 11 +- .../xpack/esql/parser/EsqlBaseLexer.interp | 2 +- .../xpack/esql/parser/EsqlBaseLexer.java | 1262 +++++++++-------- .../esql/action/EsqlQueryRequestTests.java | 25 +- .../esql/parser/StatementParserTests.java | 155 +- 9 files changed, 841 insertions(+), 662 deletions(-) create mode 100644 docs/changelog/111950.yaml diff --git a/docs/changelog/111950.yaml b/docs/changelog/111950.yaml new file mode 100644 index 0000000000000..3f23c17d8e652 --- /dev/null +++ b/docs/changelog/111950.yaml @@ -0,0 +1,6 @@ +pr: 111950 +summary: "[ES|QL] Name parameter with leading underscore" +area: ES|QL +type: enhancement +issues: + - 111821 diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/StringUtils.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/StringUtils.java index 4ba3658697c0d..cd0ade2054ce6 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/StringUtils.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/StringUtils.java @@ -398,20 +398,41 @@ public static boolean isQualified(String indexWildcard) { public static boolean isInteger(String value) { for (char c : value.trim().toCharArray()) { - if (Character.isDigit(c) == false) { + if (isDigit(c) == false) { return false; } } return true; } + private static boolean isLetter(char c) { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'); + } + + private static boolean isDigit(char c) { + return c >= '0' && c <= '9'; + } + + private static boolean isUnderscore(char c) { + return c == '_'; + } + + private static boolean isLetterOrDigitOrUnderscore(char c) { + return isLetter(c) || isDigit(c) || isUnderscore(c); + } + + private static boolean isLetterOrUnderscore(char c) { + return isLetter(c) || isUnderscore(c); + } + public static boolean isValidParamName(String value) { - // A valid name starts with a letter and contain only letter, digit or _ - if (Character.isLetter(value.charAt(0)) == false) { + // A valid name starts with a letter or _ + if (isLetterOrUnderscore(value.charAt(0)) == false) { return false; } - for (char c : value.trim().toCharArray()) { - if (Character.isLetterOrDigit(c) == false && c != '_') { + // contain only letter, digit or _ + for (char c : value.toCharArray()) { + if (isLetterOrDigitOrUnderscore(c) == false) { return false; } } diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java index 8b6511875e86c..6f5297bbeef4d 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java @@ -576,16 +576,18 @@ public void testErrorMessageForInvalidParams() throws IOException { () -> runEsqlSync( requestObjectBuilder().query("row a = 1 | eval x = ?, y = ?") .params( - "[{\"1\": \"v1\"}, {\"1-\": \"v1\"}, {\"_a\": \"v1\"}, {\"@-#\": \"v1\"}, true, 123, " - + "{\"type\": \"byte\", \"value\": 5}]" + "[{\"1\": \"v1\"}, {\"1-\": \"v1\"}, {\"-a\": \"v1\"}, {\"@-#\": \"v1\"}, true, 123, " + + "{\"type\": \"byte\", \"value\": 5}, {\"_1\": \"v1\"}, {\"_a\": \"v1\"}]" ) ) ); String error = EntityUtils.toString(re.getResponse().getEntity()).replaceAll("\\\\\n\s+\\\\", ""); assertThat(error, containsString("[1] is not a valid parameter name")); assertThat(error, containsString("[1-] is not a valid parameter name")); - assertThat(error, containsString("[_a] is not a valid parameter name")); + assertThat(error, containsString("[-a] is not a valid parameter name")); assertThat(error, containsString("[@-#] is not a valid parameter name")); + assertThat(error, not(containsString("[_a] is not a valid parameter name"))); + assertThat(error, not(containsString("[_1] is not a valid parameter name"))); assertThat(error, containsString("Params cannot contain both named and unnamed parameters")); assertThat(error, containsString("Cannot parse more than one key:value pair as parameter")); re = expectThrows( @@ -600,7 +602,6 @@ public void testErrorMessageForInvalidParams() throws IOException { EntityUtils.toString(re.getResponse().getEntity()), containsString("No parameter is defined for position 2, did you mean position 1") ); - re = expectThrows( ResponseException.class, () -> runEsqlSync(requestObjectBuilder().query("row a = ?n0").params("[{\"n1\": \"v1\"}]")) diff --git a/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.g4 b/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.g4 index 769c399fe2dcf..84494458cbc26 100644 --- a/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.g4 +++ b/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.g4 @@ -161,7 +161,7 @@ SLASH : '/'; PERCENT : '%'; NAMED_OR_POSITIONAL_PARAM - : PARAM LETTER UNQUOTED_ID_BODY* + : PARAM (LETTER | UNDERSCORE) UNQUOTED_ID_BODY* | PARAM DIGIT+ ; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java index 810e313002189..76573a8f4cc1d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java @@ -155,14 +155,16 @@ private static QueryParams parseParams(XContentParser p) throws IOException { ); } for (Map.Entry entry : param.fields.entrySet()) { - if (isValidParamName(entry.getKey()) == false) { + String name = entry.getKey(); + if (isValidParamName(name) == false) { errors.add( new XContentParseException( loc, "[" - + entry.getKey() + + name + "] is not a valid parameter name, " - + "a valid parameter name starts with a letter and contains letters, digits and underscores only" + + "a valid parameter name starts with a letter or underscore, " + + "and contains letters, digits and underscores only" ) ); } @@ -170,7 +172,7 @@ private static QueryParams parseParams(XContentParser p) throws IOException { if (type == null) { errors.add(new XContentParseException(loc, entry + " is not supported as a parameter")); } - currentParam = new QueryParam(entry.getKey(), entry.getValue(), type); + currentParam = new QueryParam(name, entry.getValue(), type); namedParams.add(currentParam); } } else { @@ -203,6 +205,7 @@ private static QueryParams parseParams(XContentParser p) throws IOException { } } } + // don't allow mixed named and unnamed parameters if (namedParams.isEmpty() == false && unNamedParams.isEmpty() == false) { errors.add( new XContentParseException( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.interp b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.interp index faf00552381fb..a8d01c959cd7f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.interp +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.interp @@ -477,4 +477,4 @@ METRICS_MODE CLOSING_METRICS_MODE atn: -[4, 0, 126, 1468, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, 7, 62, 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, 67, 2, 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, 2, 73, 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, 78, 7, 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, 7, 83, 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, 88, 2, 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, 2, 94, 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, 99, 7, 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, 2, 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, 7, 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, 2, 113, 7, 113, 2, 114, 7, 114, 2, 115, 7, 115, 2, 116, 7, 116, 2, 117, 7, 117, 2, 118, 7, 118, 2, 119, 7, 119, 2, 120, 7, 120, 2, 121, 7, 121, 2, 122, 7, 122, 2, 123, 7, 123, 2, 124, 7, 124, 2, 125, 7, 125, 2, 126, 7, 126, 2, 127, 7, 127, 2, 128, 7, 128, 2, 129, 7, 129, 2, 130, 7, 130, 2, 131, 7, 131, 2, 132, 7, 132, 2, 133, 7, 133, 2, 134, 7, 134, 2, 135, 7, 135, 2, 136, 7, 136, 2, 137, 7, 137, 2, 138, 7, 138, 2, 139, 7, 139, 2, 140, 7, 140, 2, 141, 7, 141, 2, 142, 7, 142, 2, 143, 7, 143, 2, 144, 7, 144, 2, 145, 7, 145, 2, 146, 7, 146, 2, 147, 7, 147, 2, 148, 7, 148, 2, 149, 7, 149, 2, 150, 7, 150, 2, 151, 7, 151, 2, 152, 7, 152, 2, 153, 7, 153, 2, 154, 7, 154, 2, 155, 7, 155, 2, 156, 7, 156, 2, 157, 7, 157, 2, 158, 7, 158, 2, 159, 7, 159, 2, 160, 7, 160, 2, 161, 7, 161, 2, 162, 7, 162, 2, 163, 7, 163, 2, 164, 7, 164, 2, 165, 7, 165, 2, 166, 7, 166, 2, 167, 7, 167, 2, 168, 7, 168, 2, 169, 7, 169, 2, 170, 7, 170, 2, 171, 7, 171, 2, 172, 7, 172, 2, 173, 7, 173, 2, 174, 7, 174, 2, 175, 7, 175, 2, 176, 7, 176, 2, 177, 7, 177, 2, 178, 7, 178, 2, 179, 7, 179, 2, 180, 7, 180, 2, 181, 7, 181, 2, 182, 7, 182, 2, 183, 7, 183, 2, 184, 7, 184, 2, 185, 7, 185, 2, 186, 7, 186, 2, 187, 7, 187, 2, 188, 7, 188, 2, 189, 7, 189, 2, 190, 7, 190, 2, 191, 7, 191, 2, 192, 7, 192, 2, 193, 7, 193, 2, 194, 7, 194, 2, 195, 7, 195, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 21, 4, 21, 587, 8, 21, 11, 21, 12, 21, 588, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 5, 22, 597, 8, 22, 10, 22, 12, 22, 600, 9, 22, 1, 22, 3, 22, 603, 8, 22, 1, 22, 3, 22, 606, 8, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 5, 23, 615, 8, 23, 10, 23, 12, 23, 618, 9, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 24, 4, 24, 626, 8, 24, 11, 24, 12, 24, 627, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 3, 25, 635, 8, 25, 1, 26, 4, 26, 638, 8, 26, 11, 26, 12, 26, 639, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1, 28, 1, 29, 1, 29, 1, 29, 1, 29, 1, 30, 1, 30, 1, 30, 1, 30, 1, 31, 1, 31, 1, 31, 1, 31, 1, 32, 1, 32, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 37, 1, 37, 3, 37, 679, 8, 37, 1, 37, 4, 37, 682, 8, 37, 11, 37, 12, 37, 683, 1, 38, 1, 38, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 3, 40, 693, 8, 40, 1, 41, 1, 41, 1, 42, 1, 42, 1, 42, 3, 42, 700, 8, 42, 1, 43, 1, 43, 1, 43, 5, 43, 705, 8, 43, 10, 43, 12, 43, 708, 9, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 5, 43, 716, 8, 43, 10, 43, 12, 43, 719, 9, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 3, 43, 726, 8, 43, 1, 43, 3, 43, 729, 8, 43, 3, 43, 731, 8, 43, 1, 44, 4, 44, 734, 8, 44, 11, 44, 12, 44, 735, 1, 45, 4, 45, 739, 8, 45, 11, 45, 12, 45, 740, 1, 45, 1, 45, 5, 45, 745, 8, 45, 10, 45, 12, 45, 748, 9, 45, 1, 45, 1, 45, 4, 45, 752, 8, 45, 11, 45, 12, 45, 753, 1, 45, 4, 45, 757, 8, 45, 11, 45, 12, 45, 758, 1, 45, 1, 45, 5, 45, 763, 8, 45, 10, 45, 12, 45, 766, 9, 45, 3, 45, 768, 8, 45, 1, 45, 1, 45, 1, 45, 1, 45, 4, 45, 774, 8, 45, 11, 45, 12, 45, 775, 1, 45, 1, 45, 3, 45, 780, 8, 45, 1, 46, 1, 46, 1, 46, 1, 47, 1, 47, 1, 47, 1, 47, 1, 48, 1, 48, 1, 48, 1, 48, 1, 49, 1, 49, 1, 50, 1, 50, 1, 50, 1, 51, 1, 51, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 53, 1, 53, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 56, 1, 56, 1, 56, 1, 57, 1, 57, 1, 57, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 59, 1, 59, 1, 59, 1, 59, 1, 59, 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 62, 1, 62, 1, 62, 1, 62, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 65, 1, 65, 1, 65, 1, 66, 1, 66, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 68, 1, 68, 1, 69, 1, 69, 1, 69, 1, 69, 1, 69, 1, 70, 1, 70, 1, 70, 1, 71, 1, 71, 1, 71, 1, 72, 1, 72, 1, 72, 1, 73, 1, 73, 1, 74, 1, 74, 1, 74, 1, 75, 1, 75, 1, 76, 1, 76, 1, 76, 1, 77, 1, 77, 1, 78, 1, 78, 1, 79, 1, 79, 1, 80, 1, 80, 1, 81, 1, 81, 1, 82, 1, 82, 1, 82, 5, 82, 908, 8, 82, 10, 82, 12, 82, 911, 9, 82, 1, 82, 1, 82, 4, 82, 915, 8, 82, 11, 82, 12, 82, 916, 3, 82, 919, 8, 82, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 85, 1, 85, 5, 85, 933, 8, 85, 10, 85, 12, 85, 936, 9, 85, 1, 85, 1, 85, 3, 85, 940, 8, 85, 1, 85, 4, 85, 943, 8, 85, 11, 85, 12, 85, 944, 3, 85, 947, 8, 85, 1, 86, 1, 86, 4, 86, 951, 8, 86, 11, 86, 12, 86, 952, 1, 86, 1, 86, 1, 87, 1, 87, 1, 88, 1, 88, 1, 88, 1, 88, 1, 89, 1, 89, 1, 89, 1, 89, 1, 90, 1, 90, 1, 90, 1, 90, 1, 91, 1, 91, 1, 91, 1, 91, 1, 91, 1, 92, 1, 92, 1, 92, 1, 92, 1, 93, 1, 93, 1, 93, 1, 93, 1, 94, 1, 94, 1, 94, 1, 94, 1, 95, 1, 95, 1, 95, 1, 95, 1, 96, 1, 96, 1, 96, 1, 96, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 98, 1, 98, 1, 98, 1, 98, 1, 99, 1, 99, 1, 99, 1, 99, 1, 100, 1, 100, 1, 100, 1, 100, 1, 101, 1, 101, 1, 101, 1, 101, 1, 102, 1, 102, 1, 102, 1, 102, 1, 103, 1, 103, 1, 103, 1, 103, 1, 103, 1, 104, 1, 104, 1, 104, 1, 104, 1, 105, 1, 105, 1, 105, 1, 105, 1, 106, 1, 106, 1, 106, 1, 106, 3, 106, 1042, 8, 106, 1, 107, 1, 107, 3, 107, 1046, 8, 107, 1, 107, 5, 107, 1049, 8, 107, 10, 107, 12, 107, 1052, 9, 107, 1, 107, 1, 107, 3, 107, 1056, 8, 107, 1, 107, 4, 107, 1059, 8, 107, 11, 107, 12, 107, 1060, 3, 107, 1063, 8, 107, 1, 108, 1, 108, 4, 108, 1067, 8, 108, 11, 108, 12, 108, 1068, 1, 109, 1, 109, 1, 109, 1, 109, 1, 110, 1, 110, 1, 110, 1, 110, 1, 111, 1, 111, 1, 111, 1, 111, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 113, 1, 113, 1, 113, 1, 113, 1, 114, 1, 114, 1, 114, 1, 114, 1, 115, 1, 115, 1, 115, 1, 115, 1, 116, 1, 116, 1, 116, 1, 117, 1, 117, 1, 117, 1, 117, 1, 118, 1, 118, 1, 118, 1, 118, 1, 119, 1, 119, 1, 119, 1, 119, 1, 120, 1, 120, 1, 120, 1, 120, 1, 121, 1, 121, 1, 121, 1, 121, 1, 121, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 123, 1, 123, 1, 123, 1, 123, 1, 123, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 125, 1, 125, 1, 126, 4, 126, 1144, 8, 126, 11, 126, 12, 126, 1145, 1, 126, 1, 126, 3, 126, 1150, 8, 126, 1, 126, 4, 126, 1153, 8, 126, 11, 126, 12, 126, 1154, 1, 127, 1, 127, 1, 127, 1, 127, 1, 128, 1, 128, 1, 128, 1, 128, 1, 129, 1, 129, 1, 129, 1, 129, 1, 130, 1, 130, 1, 130, 1, 130, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 132, 1, 132, 1, 132, 1, 132, 1, 133, 1, 133, 1, 133, 1, 133, 1, 134, 1, 134, 1, 134, 1, 134, 1, 135, 1, 135, 1, 135, 1, 135, 1, 136, 1, 136, 1, 136, 1, 136, 1, 137, 1, 137, 1, 137, 1, 137, 1, 138, 1, 138, 1, 138, 1, 138, 1, 139, 1, 139, 1, 139, 1, 139, 1, 140, 1, 140, 1, 140, 1, 140, 1, 141, 1, 141, 1, 141, 1, 141, 1, 141, 1, 142, 1, 142, 1, 142, 1, 142, 1, 143, 1, 143, 1, 143, 1, 143, 1, 144, 1, 144, 1, 144, 1, 144, 1, 145, 1, 145, 1, 145, 1, 145, 1, 145, 1, 146, 1, 146, 1, 146, 1, 146, 1, 147, 1, 147, 1, 147, 1, 147, 1, 148, 1, 148, 1, 148, 1, 148, 1, 149, 1, 149, 1, 149, 1, 149, 1, 150, 1, 150, 1, 150, 1, 150, 1, 151, 1, 151, 1, 151, 1, 151, 1, 151, 1, 151, 1, 152, 1, 152, 1, 152, 1, 152, 1, 153, 1, 153, 1, 153, 1, 153, 1, 154, 1, 154, 1, 154, 1, 154, 1, 155, 1, 155, 1, 155, 1, 155, 1, 156, 1, 156, 1, 156, 1, 156, 1, 157, 1, 157, 1, 157, 1, 157, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 159, 1, 159, 1, 159, 1, 159, 1, 160, 1, 160, 1, 160, 1, 160, 1, 161, 1, 161, 1, 161, 1, 161, 1, 162, 1, 162, 1, 162, 1, 162, 1, 163, 1, 163, 1, 163, 1, 163, 1, 164, 1, 164, 1, 164, 1, 164, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 166, 1, 166, 1, 166, 1, 166, 1, 166, 1, 167, 1, 167, 1, 167, 1, 167, 1, 168, 1, 168, 1, 168, 1, 168, 1, 169, 1, 169, 1, 169, 1, 169, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 172, 1, 172, 1, 172, 1, 172, 1, 173, 1, 173, 1, 173, 1, 173, 1, 174, 1, 174, 1, 174, 1, 174, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 176, 1, 176, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 4, 177, 1377, 8, 177, 11, 177, 12, 177, 1378, 1, 178, 1, 178, 1, 178, 1, 178, 1, 179, 1, 179, 1, 179, 1, 179, 1, 180, 1, 180, 1, 180, 1, 180, 1, 181, 1, 181, 1, 181, 1, 181, 1, 181, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 183, 1, 183, 1, 183, 1, 183, 1, 183, 1, 183, 1, 184, 1, 184, 1, 184, 1, 184, 1, 185, 1, 185, 1, 185, 1, 185, 1, 186, 1, 186, 1, 186, 1, 186, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 188, 1, 188, 1, 188, 1, 188, 1, 188, 1, 188, 1, 189, 1, 189, 1, 189, 1, 189, 1, 190, 1, 190, 1, 190, 1, 190, 1, 191, 1, 191, 1, 191, 1, 191, 1, 192, 1, 192, 1, 192, 1, 192, 1, 192, 1, 192, 1, 193, 1, 193, 1, 193, 1, 193, 1, 193, 1, 193, 1, 194, 1, 194, 1, 194, 1, 194, 1, 194, 1, 194, 1, 195, 1, 195, 1, 195, 1, 195, 1, 195, 2, 616, 717, 0, 196, 16, 1, 18, 2, 20, 3, 22, 4, 24, 5, 26, 6, 28, 7, 30, 8, 32, 9, 34, 10, 36, 11, 38, 12, 40, 13, 42, 14, 44, 15, 46, 16, 48, 17, 50, 18, 52, 19, 54, 20, 56, 21, 58, 22, 60, 23, 62, 24, 64, 25, 66, 0, 68, 26, 70, 0, 72, 0, 74, 27, 76, 28, 78, 29, 80, 30, 82, 0, 84, 0, 86, 0, 88, 0, 90, 0, 92, 0, 94, 0, 96, 0, 98, 0, 100, 0, 102, 31, 104, 32, 106, 33, 108, 34, 110, 35, 112, 36, 114, 37, 116, 38, 118, 39, 120, 40, 122, 41, 124, 42, 126, 43, 128, 44, 130, 45, 132, 46, 134, 47, 136, 48, 138, 49, 140, 50, 142, 51, 144, 52, 146, 53, 148, 54, 150, 55, 152, 56, 154, 57, 156, 58, 158, 59, 160, 60, 162, 61, 164, 62, 166, 63, 168, 64, 170, 65, 172, 66, 174, 67, 176, 68, 178, 69, 180, 70, 182, 71, 184, 72, 186, 73, 188, 0, 190, 74, 192, 75, 194, 76, 196, 77, 198, 0, 200, 0, 202, 0, 204, 0, 206, 0, 208, 0, 210, 78, 212, 0, 214, 0, 216, 79, 218, 80, 220, 81, 222, 0, 224, 0, 226, 0, 228, 0, 230, 0, 232, 82, 234, 83, 236, 84, 238, 85, 240, 0, 242, 0, 244, 0, 246, 0, 248, 86, 250, 0, 252, 87, 254, 88, 256, 89, 258, 0, 260, 0, 262, 90, 264, 91, 266, 0, 268, 92, 270, 0, 272, 93, 274, 94, 276, 95, 278, 0, 280, 0, 282, 0, 284, 0, 286, 0, 288, 0, 290, 0, 292, 96, 294, 97, 296, 98, 298, 0, 300, 0, 302, 0, 304, 0, 306, 0, 308, 0, 310, 0, 312, 99, 314, 100, 316, 101, 318, 0, 320, 0, 322, 0, 324, 0, 326, 102, 328, 103, 330, 104, 332, 0, 334, 0, 336, 0, 338, 0, 340, 105, 342, 106, 344, 107, 346, 0, 348, 108, 350, 109, 352, 110, 354, 111, 356, 0, 358, 112, 360, 113, 362, 114, 364, 115, 366, 0, 368, 116, 370, 117, 372, 118, 374, 119, 376, 120, 378, 0, 380, 0, 382, 0, 384, 121, 386, 122, 388, 123, 390, 0, 392, 0, 394, 124, 396, 125, 398, 126, 400, 0, 402, 0, 404, 0, 406, 0, 16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 13, 6, 0, 9, 10, 13, 13, 32, 32, 47, 47, 91, 91, 93, 93, 2, 0, 10, 10, 13, 13, 3, 0, 9, 10, 13, 13, 32, 32, 11, 0, 9, 10, 13, 13, 32, 32, 34, 34, 44, 44, 47, 47, 58, 58, 61, 61, 91, 91, 93, 93, 124, 124, 2, 0, 42, 42, 47, 47, 1, 0, 48, 57, 2, 0, 65, 90, 97, 122, 5, 0, 34, 34, 92, 92, 110, 110, 114, 114, 116, 116, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 1, 0, 96, 96, 11, 0, 9, 10, 13, 13, 32, 32, 34, 35, 44, 44, 47, 47, 58, 58, 60, 60, 62, 63, 92, 92, 124, 124, 1494, 0, 16, 1, 0, 0, 0, 0, 18, 1, 0, 0, 0, 0, 20, 1, 0, 0, 0, 0, 22, 1, 0, 0, 0, 0, 24, 1, 0, 0, 0, 0, 26, 1, 0, 0, 0, 0, 28, 1, 0, 0, 0, 0, 30, 1, 0, 0, 0, 0, 32, 1, 0, 0, 0, 0, 34, 1, 0, 0, 0, 0, 36, 1, 0, 0, 0, 0, 38, 1, 0, 0, 0, 0, 40, 1, 0, 0, 0, 0, 42, 1, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 46, 1, 0, 0, 0, 0, 48, 1, 0, 0, 0, 0, 50, 1, 0, 0, 0, 0, 52, 1, 0, 0, 0, 0, 54, 1, 0, 0, 0, 0, 56, 1, 0, 0, 0, 0, 58, 1, 0, 0, 0, 0, 60, 1, 0, 0, 0, 0, 62, 1, 0, 0, 0, 0, 64, 1, 0, 0, 0, 0, 68, 1, 0, 0, 0, 1, 70, 1, 0, 0, 0, 1, 72, 1, 0, 0, 0, 1, 74, 1, 0, 0, 0, 1, 76, 1, 0, 0, 0, 1, 78, 1, 0, 0, 0, 2, 80, 1, 0, 0, 0, 2, 102, 1, 0, 0, 0, 2, 104, 1, 0, 0, 0, 2, 106, 1, 0, 0, 0, 2, 108, 1, 0, 0, 0, 2, 110, 1, 0, 0, 0, 2, 112, 1, 0, 0, 0, 2, 114, 1, 0, 0, 0, 2, 116, 1, 0, 0, 0, 2, 118, 1, 0, 0, 0, 2, 120, 1, 0, 0, 0, 2, 122, 1, 0, 0, 0, 2, 124, 1, 0, 0, 0, 2, 126, 1, 0, 0, 0, 2, 128, 1, 0, 0, 0, 2, 130, 1, 0, 0, 0, 2, 132, 1, 0, 0, 0, 2, 134, 1, 0, 0, 0, 2, 136, 1, 0, 0, 0, 2, 138, 1, 0, 0, 0, 2, 140, 1, 0, 0, 0, 2, 142, 1, 0, 0, 0, 2, 144, 1, 0, 0, 0, 2, 146, 1, 0, 0, 0, 2, 148, 1, 0, 0, 0, 2, 150, 1, 0, 0, 0, 2, 152, 1, 0, 0, 0, 2, 154, 1, 0, 0, 0, 2, 156, 1, 0, 0, 0, 2, 158, 1, 0, 0, 0, 2, 160, 1, 0, 0, 0, 2, 162, 1, 0, 0, 0, 2, 164, 1, 0, 0, 0, 2, 166, 1, 0, 0, 0, 2, 168, 1, 0, 0, 0, 2, 170, 1, 0, 0, 0, 2, 172, 1, 0, 0, 0, 2, 174, 1, 0, 0, 0, 2, 176, 1, 0, 0, 0, 2, 178, 1, 0, 0, 0, 2, 180, 1, 0, 0, 0, 2, 182, 1, 0, 0, 0, 2, 184, 1, 0, 0, 0, 2, 186, 1, 0, 0, 0, 2, 190, 1, 0, 0, 0, 2, 192, 1, 0, 0, 0, 2, 194, 1, 0, 0, 0, 2, 196, 1, 0, 0, 0, 3, 198, 1, 0, 0, 0, 3, 200, 1, 0, 0, 0, 3, 202, 1, 0, 0, 0, 3, 204, 1, 0, 0, 0, 3, 206, 1, 0, 0, 0, 3, 208, 1, 0, 0, 0, 3, 210, 1, 0, 0, 0, 3, 212, 1, 0, 0, 0, 3, 214, 1, 0, 0, 0, 3, 216, 1, 0, 0, 0, 3, 218, 1, 0, 0, 0, 3, 220, 1, 0, 0, 0, 4, 222, 1, 0, 0, 0, 4, 224, 1, 0, 0, 0, 4, 226, 1, 0, 0, 0, 4, 232, 1, 0, 0, 0, 4, 234, 1, 0, 0, 0, 4, 236, 1, 0, 0, 0, 4, 238, 1, 0, 0, 0, 5, 240, 1, 0, 0, 0, 5, 242, 1, 0, 0, 0, 5, 244, 1, 0, 0, 0, 5, 246, 1, 0, 0, 0, 5, 248, 1, 0, 0, 0, 5, 250, 1, 0, 0, 0, 5, 252, 1, 0, 0, 0, 5, 254, 1, 0, 0, 0, 5, 256, 1, 0, 0, 0, 6, 258, 1, 0, 0, 0, 6, 260, 1, 0, 0, 0, 6, 262, 1, 0, 0, 0, 6, 264, 1, 0, 0, 0, 6, 268, 1, 0, 0, 0, 6, 270, 1, 0, 0, 0, 6, 272, 1, 0, 0, 0, 6, 274, 1, 0, 0, 0, 6, 276, 1, 0, 0, 0, 7, 278, 1, 0, 0, 0, 7, 280, 1, 0, 0, 0, 7, 282, 1, 0, 0, 0, 7, 284, 1, 0, 0, 0, 7, 286, 1, 0, 0, 0, 7, 288, 1, 0, 0, 0, 7, 290, 1, 0, 0, 0, 7, 292, 1, 0, 0, 0, 7, 294, 1, 0, 0, 0, 7, 296, 1, 0, 0, 0, 8, 298, 1, 0, 0, 0, 8, 300, 1, 0, 0, 0, 8, 302, 1, 0, 0, 0, 8, 304, 1, 0, 0, 0, 8, 306, 1, 0, 0, 0, 8, 308, 1, 0, 0, 0, 8, 310, 1, 0, 0, 0, 8, 312, 1, 0, 0, 0, 8, 314, 1, 0, 0, 0, 8, 316, 1, 0, 0, 0, 9, 318, 1, 0, 0, 0, 9, 320, 1, 0, 0, 0, 9, 322, 1, 0, 0, 0, 9, 324, 1, 0, 0, 0, 9, 326, 1, 0, 0, 0, 9, 328, 1, 0, 0, 0, 9, 330, 1, 0, 0, 0, 10, 332, 1, 0, 0, 0, 10, 334, 1, 0, 0, 0, 10, 336, 1, 0, 0, 0, 10, 338, 1, 0, 0, 0, 10, 340, 1, 0, 0, 0, 10, 342, 1, 0, 0, 0, 10, 344, 1, 0, 0, 0, 11, 346, 1, 0, 0, 0, 11, 348, 1, 0, 0, 0, 11, 350, 1, 0, 0, 0, 11, 352, 1, 0, 0, 0, 11, 354, 1, 0, 0, 0, 12, 356, 1, 0, 0, 0, 12, 358, 1, 0, 0, 0, 12, 360, 1, 0, 0, 0, 12, 362, 1, 0, 0, 0, 12, 364, 1, 0, 0, 0, 13, 366, 1, 0, 0, 0, 13, 368, 1, 0, 0, 0, 13, 370, 1, 0, 0, 0, 13, 372, 1, 0, 0, 0, 13, 374, 1, 0, 0, 0, 13, 376, 1, 0, 0, 0, 14, 378, 1, 0, 0, 0, 14, 380, 1, 0, 0, 0, 14, 382, 1, 0, 0, 0, 14, 384, 1, 0, 0, 0, 14, 386, 1, 0, 0, 0, 14, 388, 1, 0, 0, 0, 15, 390, 1, 0, 0, 0, 15, 392, 1, 0, 0, 0, 15, 394, 1, 0, 0, 0, 15, 396, 1, 0, 0, 0, 15, 398, 1, 0, 0, 0, 15, 400, 1, 0, 0, 0, 15, 402, 1, 0, 0, 0, 15, 404, 1, 0, 0, 0, 15, 406, 1, 0, 0, 0, 16, 408, 1, 0, 0, 0, 18, 418, 1, 0, 0, 0, 20, 425, 1, 0, 0, 0, 22, 434, 1, 0, 0, 0, 24, 441, 1, 0, 0, 0, 26, 451, 1, 0, 0, 0, 28, 458, 1, 0, 0, 0, 30, 465, 1, 0, 0, 0, 32, 479, 1, 0, 0, 0, 34, 486, 1, 0, 0, 0, 36, 494, 1, 0, 0, 0, 38, 503, 1, 0, 0, 0, 40, 510, 1, 0, 0, 0, 42, 520, 1, 0, 0, 0, 44, 532, 1, 0, 0, 0, 46, 541, 1, 0, 0, 0, 48, 547, 1, 0, 0, 0, 50, 554, 1, 0, 0, 0, 52, 561, 1, 0, 0, 0, 54, 569, 1, 0, 0, 0, 56, 577, 1, 0, 0, 0, 58, 586, 1, 0, 0, 0, 60, 592, 1, 0, 0, 0, 62, 609, 1, 0, 0, 0, 64, 625, 1, 0, 0, 0, 66, 634, 1, 0, 0, 0, 68, 637, 1, 0, 0, 0, 70, 641, 1, 0, 0, 0, 72, 646, 1, 0, 0, 0, 74, 651, 1, 0, 0, 0, 76, 655, 1, 0, 0, 0, 78, 659, 1, 0, 0, 0, 80, 663, 1, 0, 0, 0, 82, 667, 1, 0, 0, 0, 84, 669, 1, 0, 0, 0, 86, 671, 1, 0, 0, 0, 88, 674, 1, 0, 0, 0, 90, 676, 1, 0, 0, 0, 92, 685, 1, 0, 0, 0, 94, 687, 1, 0, 0, 0, 96, 692, 1, 0, 0, 0, 98, 694, 1, 0, 0, 0, 100, 699, 1, 0, 0, 0, 102, 730, 1, 0, 0, 0, 104, 733, 1, 0, 0, 0, 106, 779, 1, 0, 0, 0, 108, 781, 1, 0, 0, 0, 110, 784, 1, 0, 0, 0, 112, 788, 1, 0, 0, 0, 114, 792, 1, 0, 0, 0, 116, 794, 1, 0, 0, 0, 118, 797, 1, 0, 0, 0, 120, 799, 1, 0, 0, 0, 122, 804, 1, 0, 0, 0, 124, 806, 1, 0, 0, 0, 126, 812, 1, 0, 0, 0, 128, 818, 1, 0, 0, 0, 130, 821, 1, 0, 0, 0, 132, 824, 1, 0, 0, 0, 134, 829, 1, 0, 0, 0, 136, 834, 1, 0, 0, 0, 138, 836, 1, 0, 0, 0, 140, 842, 1, 0, 0, 0, 142, 846, 1, 0, 0, 0, 144, 851, 1, 0, 0, 0, 146, 857, 1, 0, 0, 0, 148, 860, 1, 0, 0, 0, 150, 862, 1, 0, 0, 0, 152, 868, 1, 0, 0, 0, 154, 870, 1, 0, 0, 0, 156, 875, 1, 0, 0, 0, 158, 878, 1, 0, 0, 0, 160, 881, 1, 0, 0, 0, 162, 884, 1, 0, 0, 0, 164, 886, 1, 0, 0, 0, 166, 889, 1, 0, 0, 0, 168, 891, 1, 0, 0, 0, 170, 894, 1, 0, 0, 0, 172, 896, 1, 0, 0, 0, 174, 898, 1, 0, 0, 0, 176, 900, 1, 0, 0, 0, 178, 902, 1, 0, 0, 0, 180, 918, 1, 0, 0, 0, 182, 920, 1, 0, 0, 0, 184, 925, 1, 0, 0, 0, 186, 946, 1, 0, 0, 0, 188, 948, 1, 0, 0, 0, 190, 956, 1, 0, 0, 0, 192, 958, 1, 0, 0, 0, 194, 962, 1, 0, 0, 0, 196, 966, 1, 0, 0, 0, 198, 970, 1, 0, 0, 0, 200, 975, 1, 0, 0, 0, 202, 979, 1, 0, 0, 0, 204, 983, 1, 0, 0, 0, 206, 987, 1, 0, 0, 0, 208, 991, 1, 0, 0, 0, 210, 995, 1, 0, 0, 0, 212, 1004, 1, 0, 0, 0, 214, 1008, 1, 0, 0, 0, 216, 1012, 1, 0, 0, 0, 218, 1016, 1, 0, 0, 0, 220, 1020, 1, 0, 0, 0, 222, 1024, 1, 0, 0, 0, 224, 1029, 1, 0, 0, 0, 226, 1033, 1, 0, 0, 0, 228, 1041, 1, 0, 0, 0, 230, 1062, 1, 0, 0, 0, 232, 1066, 1, 0, 0, 0, 234, 1070, 1, 0, 0, 0, 236, 1074, 1, 0, 0, 0, 238, 1078, 1, 0, 0, 0, 240, 1082, 1, 0, 0, 0, 242, 1087, 1, 0, 0, 0, 244, 1091, 1, 0, 0, 0, 246, 1095, 1, 0, 0, 0, 248, 1099, 1, 0, 0, 0, 250, 1102, 1, 0, 0, 0, 252, 1106, 1, 0, 0, 0, 254, 1110, 1, 0, 0, 0, 256, 1114, 1, 0, 0, 0, 258, 1118, 1, 0, 0, 0, 260, 1123, 1, 0, 0, 0, 262, 1128, 1, 0, 0, 0, 264, 1133, 1, 0, 0, 0, 266, 1140, 1, 0, 0, 0, 268, 1149, 1, 0, 0, 0, 270, 1156, 1, 0, 0, 0, 272, 1160, 1, 0, 0, 0, 274, 1164, 1, 0, 0, 0, 276, 1168, 1, 0, 0, 0, 278, 1172, 1, 0, 0, 0, 280, 1178, 1, 0, 0, 0, 282, 1182, 1, 0, 0, 0, 284, 1186, 1, 0, 0, 0, 286, 1190, 1, 0, 0, 0, 288, 1194, 1, 0, 0, 0, 290, 1198, 1, 0, 0, 0, 292, 1202, 1, 0, 0, 0, 294, 1206, 1, 0, 0, 0, 296, 1210, 1, 0, 0, 0, 298, 1214, 1, 0, 0, 0, 300, 1219, 1, 0, 0, 0, 302, 1223, 1, 0, 0, 0, 304, 1227, 1, 0, 0, 0, 306, 1231, 1, 0, 0, 0, 308, 1236, 1, 0, 0, 0, 310, 1240, 1, 0, 0, 0, 312, 1244, 1, 0, 0, 0, 314, 1248, 1, 0, 0, 0, 316, 1252, 1, 0, 0, 0, 318, 1256, 1, 0, 0, 0, 320, 1262, 1, 0, 0, 0, 322, 1266, 1, 0, 0, 0, 324, 1270, 1, 0, 0, 0, 326, 1274, 1, 0, 0, 0, 328, 1278, 1, 0, 0, 0, 330, 1282, 1, 0, 0, 0, 332, 1286, 1, 0, 0, 0, 334, 1291, 1, 0, 0, 0, 336, 1295, 1, 0, 0, 0, 338, 1299, 1, 0, 0, 0, 340, 1303, 1, 0, 0, 0, 342, 1307, 1, 0, 0, 0, 344, 1311, 1, 0, 0, 0, 346, 1315, 1, 0, 0, 0, 348, 1320, 1, 0, 0, 0, 350, 1325, 1, 0, 0, 0, 352, 1329, 1, 0, 0, 0, 354, 1333, 1, 0, 0, 0, 356, 1337, 1, 0, 0, 0, 358, 1342, 1, 0, 0, 0, 360, 1352, 1, 0, 0, 0, 362, 1356, 1, 0, 0, 0, 364, 1360, 1, 0, 0, 0, 366, 1364, 1, 0, 0, 0, 368, 1369, 1, 0, 0, 0, 370, 1376, 1, 0, 0, 0, 372, 1380, 1, 0, 0, 0, 374, 1384, 1, 0, 0, 0, 376, 1388, 1, 0, 0, 0, 378, 1392, 1, 0, 0, 0, 380, 1397, 1, 0, 0, 0, 382, 1403, 1, 0, 0, 0, 384, 1409, 1, 0, 0, 0, 386, 1413, 1, 0, 0, 0, 388, 1417, 1, 0, 0, 0, 390, 1421, 1, 0, 0, 0, 392, 1427, 1, 0, 0, 0, 394, 1433, 1, 0, 0, 0, 396, 1437, 1, 0, 0, 0, 398, 1441, 1, 0, 0, 0, 400, 1445, 1, 0, 0, 0, 402, 1451, 1, 0, 0, 0, 404, 1457, 1, 0, 0, 0, 406, 1463, 1, 0, 0, 0, 408, 409, 5, 100, 0, 0, 409, 410, 5, 105, 0, 0, 410, 411, 5, 115, 0, 0, 411, 412, 5, 115, 0, 0, 412, 413, 5, 101, 0, 0, 413, 414, 5, 99, 0, 0, 414, 415, 5, 116, 0, 0, 415, 416, 1, 0, 0, 0, 416, 417, 6, 0, 0, 0, 417, 17, 1, 0, 0, 0, 418, 419, 5, 100, 0, 0, 419, 420, 5, 114, 0, 0, 420, 421, 5, 111, 0, 0, 421, 422, 5, 112, 0, 0, 422, 423, 1, 0, 0, 0, 423, 424, 6, 1, 1, 0, 424, 19, 1, 0, 0, 0, 425, 426, 5, 101, 0, 0, 426, 427, 5, 110, 0, 0, 427, 428, 5, 114, 0, 0, 428, 429, 5, 105, 0, 0, 429, 430, 5, 99, 0, 0, 430, 431, 5, 104, 0, 0, 431, 432, 1, 0, 0, 0, 432, 433, 6, 2, 2, 0, 433, 21, 1, 0, 0, 0, 434, 435, 5, 101, 0, 0, 435, 436, 5, 118, 0, 0, 436, 437, 5, 97, 0, 0, 437, 438, 5, 108, 0, 0, 438, 439, 1, 0, 0, 0, 439, 440, 6, 3, 0, 0, 440, 23, 1, 0, 0, 0, 441, 442, 5, 101, 0, 0, 442, 443, 5, 120, 0, 0, 443, 444, 5, 112, 0, 0, 444, 445, 5, 108, 0, 0, 445, 446, 5, 97, 0, 0, 446, 447, 5, 105, 0, 0, 447, 448, 5, 110, 0, 0, 448, 449, 1, 0, 0, 0, 449, 450, 6, 4, 3, 0, 450, 25, 1, 0, 0, 0, 451, 452, 5, 102, 0, 0, 452, 453, 5, 114, 0, 0, 453, 454, 5, 111, 0, 0, 454, 455, 5, 109, 0, 0, 455, 456, 1, 0, 0, 0, 456, 457, 6, 5, 4, 0, 457, 27, 1, 0, 0, 0, 458, 459, 5, 103, 0, 0, 459, 460, 5, 114, 0, 0, 460, 461, 5, 111, 0, 0, 461, 462, 5, 107, 0, 0, 462, 463, 1, 0, 0, 0, 463, 464, 6, 6, 0, 0, 464, 29, 1, 0, 0, 0, 465, 466, 5, 105, 0, 0, 466, 467, 5, 110, 0, 0, 467, 468, 5, 108, 0, 0, 468, 469, 5, 105, 0, 0, 469, 470, 5, 110, 0, 0, 470, 471, 5, 101, 0, 0, 471, 472, 5, 115, 0, 0, 472, 473, 5, 116, 0, 0, 473, 474, 5, 97, 0, 0, 474, 475, 5, 116, 0, 0, 475, 476, 5, 115, 0, 0, 476, 477, 1, 0, 0, 0, 477, 478, 6, 7, 0, 0, 478, 31, 1, 0, 0, 0, 479, 480, 5, 107, 0, 0, 480, 481, 5, 101, 0, 0, 481, 482, 5, 101, 0, 0, 482, 483, 5, 112, 0, 0, 483, 484, 1, 0, 0, 0, 484, 485, 6, 8, 1, 0, 485, 33, 1, 0, 0, 0, 486, 487, 5, 108, 0, 0, 487, 488, 5, 105, 0, 0, 488, 489, 5, 109, 0, 0, 489, 490, 5, 105, 0, 0, 490, 491, 5, 116, 0, 0, 491, 492, 1, 0, 0, 0, 492, 493, 6, 9, 0, 0, 493, 35, 1, 0, 0, 0, 494, 495, 5, 108, 0, 0, 495, 496, 5, 111, 0, 0, 496, 497, 5, 111, 0, 0, 497, 498, 5, 107, 0, 0, 498, 499, 5, 117, 0, 0, 499, 500, 5, 112, 0, 0, 500, 501, 1, 0, 0, 0, 501, 502, 6, 10, 5, 0, 502, 37, 1, 0, 0, 0, 503, 504, 5, 109, 0, 0, 504, 505, 5, 101, 0, 0, 505, 506, 5, 116, 0, 0, 506, 507, 5, 97, 0, 0, 507, 508, 1, 0, 0, 0, 508, 509, 6, 11, 6, 0, 509, 39, 1, 0, 0, 0, 510, 511, 5, 109, 0, 0, 511, 512, 5, 101, 0, 0, 512, 513, 5, 116, 0, 0, 513, 514, 5, 114, 0, 0, 514, 515, 5, 105, 0, 0, 515, 516, 5, 99, 0, 0, 516, 517, 5, 115, 0, 0, 517, 518, 1, 0, 0, 0, 518, 519, 6, 12, 7, 0, 519, 41, 1, 0, 0, 0, 520, 521, 5, 109, 0, 0, 521, 522, 5, 118, 0, 0, 522, 523, 5, 95, 0, 0, 523, 524, 5, 101, 0, 0, 524, 525, 5, 120, 0, 0, 525, 526, 5, 112, 0, 0, 526, 527, 5, 97, 0, 0, 527, 528, 5, 110, 0, 0, 528, 529, 5, 100, 0, 0, 529, 530, 1, 0, 0, 0, 530, 531, 6, 13, 8, 0, 531, 43, 1, 0, 0, 0, 532, 533, 5, 114, 0, 0, 533, 534, 5, 101, 0, 0, 534, 535, 5, 110, 0, 0, 535, 536, 5, 97, 0, 0, 536, 537, 5, 109, 0, 0, 537, 538, 5, 101, 0, 0, 538, 539, 1, 0, 0, 0, 539, 540, 6, 14, 9, 0, 540, 45, 1, 0, 0, 0, 541, 542, 5, 114, 0, 0, 542, 543, 5, 111, 0, 0, 543, 544, 5, 119, 0, 0, 544, 545, 1, 0, 0, 0, 545, 546, 6, 15, 0, 0, 546, 47, 1, 0, 0, 0, 547, 548, 5, 115, 0, 0, 548, 549, 5, 104, 0, 0, 549, 550, 5, 111, 0, 0, 550, 551, 5, 119, 0, 0, 551, 552, 1, 0, 0, 0, 552, 553, 6, 16, 10, 0, 553, 49, 1, 0, 0, 0, 554, 555, 5, 115, 0, 0, 555, 556, 5, 111, 0, 0, 556, 557, 5, 114, 0, 0, 557, 558, 5, 116, 0, 0, 558, 559, 1, 0, 0, 0, 559, 560, 6, 17, 0, 0, 560, 51, 1, 0, 0, 0, 561, 562, 5, 115, 0, 0, 562, 563, 5, 116, 0, 0, 563, 564, 5, 97, 0, 0, 564, 565, 5, 116, 0, 0, 565, 566, 5, 115, 0, 0, 566, 567, 1, 0, 0, 0, 567, 568, 6, 18, 0, 0, 568, 53, 1, 0, 0, 0, 569, 570, 5, 119, 0, 0, 570, 571, 5, 104, 0, 0, 571, 572, 5, 101, 0, 0, 572, 573, 5, 114, 0, 0, 573, 574, 5, 101, 0, 0, 574, 575, 1, 0, 0, 0, 575, 576, 6, 19, 0, 0, 576, 55, 1, 0, 0, 0, 577, 578, 5, 109, 0, 0, 578, 579, 5, 97, 0, 0, 579, 580, 5, 116, 0, 0, 580, 581, 5, 99, 0, 0, 581, 582, 5, 104, 0, 0, 582, 583, 1, 0, 0, 0, 583, 584, 6, 20, 0, 0, 584, 57, 1, 0, 0, 0, 585, 587, 8, 0, 0, 0, 586, 585, 1, 0, 0, 0, 587, 588, 1, 0, 0, 0, 588, 586, 1, 0, 0, 0, 588, 589, 1, 0, 0, 0, 589, 590, 1, 0, 0, 0, 590, 591, 6, 21, 0, 0, 591, 59, 1, 0, 0, 0, 592, 593, 5, 47, 0, 0, 593, 594, 5, 47, 0, 0, 594, 598, 1, 0, 0, 0, 595, 597, 8, 1, 0, 0, 596, 595, 1, 0, 0, 0, 597, 600, 1, 0, 0, 0, 598, 596, 1, 0, 0, 0, 598, 599, 1, 0, 0, 0, 599, 602, 1, 0, 0, 0, 600, 598, 1, 0, 0, 0, 601, 603, 5, 13, 0, 0, 602, 601, 1, 0, 0, 0, 602, 603, 1, 0, 0, 0, 603, 605, 1, 0, 0, 0, 604, 606, 5, 10, 0, 0, 605, 604, 1, 0, 0, 0, 605, 606, 1, 0, 0, 0, 606, 607, 1, 0, 0, 0, 607, 608, 6, 22, 11, 0, 608, 61, 1, 0, 0, 0, 609, 610, 5, 47, 0, 0, 610, 611, 5, 42, 0, 0, 611, 616, 1, 0, 0, 0, 612, 615, 3, 62, 23, 0, 613, 615, 9, 0, 0, 0, 614, 612, 1, 0, 0, 0, 614, 613, 1, 0, 0, 0, 615, 618, 1, 0, 0, 0, 616, 617, 1, 0, 0, 0, 616, 614, 1, 0, 0, 0, 617, 619, 1, 0, 0, 0, 618, 616, 1, 0, 0, 0, 619, 620, 5, 42, 0, 0, 620, 621, 5, 47, 0, 0, 621, 622, 1, 0, 0, 0, 622, 623, 6, 23, 11, 0, 623, 63, 1, 0, 0, 0, 624, 626, 7, 2, 0, 0, 625, 624, 1, 0, 0, 0, 626, 627, 1, 0, 0, 0, 627, 625, 1, 0, 0, 0, 627, 628, 1, 0, 0, 0, 628, 629, 1, 0, 0, 0, 629, 630, 6, 24, 11, 0, 630, 65, 1, 0, 0, 0, 631, 635, 8, 3, 0, 0, 632, 633, 5, 47, 0, 0, 633, 635, 8, 4, 0, 0, 634, 631, 1, 0, 0, 0, 634, 632, 1, 0, 0, 0, 635, 67, 1, 0, 0, 0, 636, 638, 3, 66, 25, 0, 637, 636, 1, 0, 0, 0, 638, 639, 1, 0, 0, 0, 639, 637, 1, 0, 0, 0, 639, 640, 1, 0, 0, 0, 640, 69, 1, 0, 0, 0, 641, 642, 3, 182, 83, 0, 642, 643, 1, 0, 0, 0, 643, 644, 6, 27, 12, 0, 644, 645, 6, 27, 13, 0, 645, 71, 1, 0, 0, 0, 646, 647, 3, 80, 32, 0, 647, 648, 1, 0, 0, 0, 648, 649, 6, 28, 14, 0, 649, 650, 6, 28, 15, 0, 650, 73, 1, 0, 0, 0, 651, 652, 3, 64, 24, 0, 652, 653, 1, 0, 0, 0, 653, 654, 6, 29, 11, 0, 654, 75, 1, 0, 0, 0, 655, 656, 3, 60, 22, 0, 656, 657, 1, 0, 0, 0, 657, 658, 6, 30, 11, 0, 658, 77, 1, 0, 0, 0, 659, 660, 3, 62, 23, 0, 660, 661, 1, 0, 0, 0, 661, 662, 6, 31, 11, 0, 662, 79, 1, 0, 0, 0, 663, 664, 5, 124, 0, 0, 664, 665, 1, 0, 0, 0, 665, 666, 6, 32, 15, 0, 666, 81, 1, 0, 0, 0, 667, 668, 7, 5, 0, 0, 668, 83, 1, 0, 0, 0, 669, 670, 7, 6, 0, 0, 670, 85, 1, 0, 0, 0, 671, 672, 5, 92, 0, 0, 672, 673, 7, 7, 0, 0, 673, 87, 1, 0, 0, 0, 674, 675, 8, 8, 0, 0, 675, 89, 1, 0, 0, 0, 676, 678, 7, 9, 0, 0, 677, 679, 7, 10, 0, 0, 678, 677, 1, 0, 0, 0, 678, 679, 1, 0, 0, 0, 679, 681, 1, 0, 0, 0, 680, 682, 3, 82, 33, 0, 681, 680, 1, 0, 0, 0, 682, 683, 1, 0, 0, 0, 683, 681, 1, 0, 0, 0, 683, 684, 1, 0, 0, 0, 684, 91, 1, 0, 0, 0, 685, 686, 5, 64, 0, 0, 686, 93, 1, 0, 0, 0, 687, 688, 5, 96, 0, 0, 688, 95, 1, 0, 0, 0, 689, 693, 8, 11, 0, 0, 690, 691, 5, 96, 0, 0, 691, 693, 5, 96, 0, 0, 692, 689, 1, 0, 0, 0, 692, 690, 1, 0, 0, 0, 693, 97, 1, 0, 0, 0, 694, 695, 5, 95, 0, 0, 695, 99, 1, 0, 0, 0, 696, 700, 3, 84, 34, 0, 697, 700, 3, 82, 33, 0, 698, 700, 3, 98, 41, 0, 699, 696, 1, 0, 0, 0, 699, 697, 1, 0, 0, 0, 699, 698, 1, 0, 0, 0, 700, 101, 1, 0, 0, 0, 701, 706, 5, 34, 0, 0, 702, 705, 3, 86, 35, 0, 703, 705, 3, 88, 36, 0, 704, 702, 1, 0, 0, 0, 704, 703, 1, 0, 0, 0, 705, 708, 1, 0, 0, 0, 706, 704, 1, 0, 0, 0, 706, 707, 1, 0, 0, 0, 707, 709, 1, 0, 0, 0, 708, 706, 1, 0, 0, 0, 709, 731, 5, 34, 0, 0, 710, 711, 5, 34, 0, 0, 711, 712, 5, 34, 0, 0, 712, 713, 5, 34, 0, 0, 713, 717, 1, 0, 0, 0, 714, 716, 8, 1, 0, 0, 715, 714, 1, 0, 0, 0, 716, 719, 1, 0, 0, 0, 717, 718, 1, 0, 0, 0, 717, 715, 1, 0, 0, 0, 718, 720, 1, 0, 0, 0, 719, 717, 1, 0, 0, 0, 720, 721, 5, 34, 0, 0, 721, 722, 5, 34, 0, 0, 722, 723, 5, 34, 0, 0, 723, 725, 1, 0, 0, 0, 724, 726, 5, 34, 0, 0, 725, 724, 1, 0, 0, 0, 725, 726, 1, 0, 0, 0, 726, 728, 1, 0, 0, 0, 727, 729, 5, 34, 0, 0, 728, 727, 1, 0, 0, 0, 728, 729, 1, 0, 0, 0, 729, 731, 1, 0, 0, 0, 730, 701, 1, 0, 0, 0, 730, 710, 1, 0, 0, 0, 731, 103, 1, 0, 0, 0, 732, 734, 3, 82, 33, 0, 733, 732, 1, 0, 0, 0, 734, 735, 1, 0, 0, 0, 735, 733, 1, 0, 0, 0, 735, 736, 1, 0, 0, 0, 736, 105, 1, 0, 0, 0, 737, 739, 3, 82, 33, 0, 738, 737, 1, 0, 0, 0, 739, 740, 1, 0, 0, 0, 740, 738, 1, 0, 0, 0, 740, 741, 1, 0, 0, 0, 741, 742, 1, 0, 0, 0, 742, 746, 3, 122, 53, 0, 743, 745, 3, 82, 33, 0, 744, 743, 1, 0, 0, 0, 745, 748, 1, 0, 0, 0, 746, 744, 1, 0, 0, 0, 746, 747, 1, 0, 0, 0, 747, 780, 1, 0, 0, 0, 748, 746, 1, 0, 0, 0, 749, 751, 3, 122, 53, 0, 750, 752, 3, 82, 33, 0, 751, 750, 1, 0, 0, 0, 752, 753, 1, 0, 0, 0, 753, 751, 1, 0, 0, 0, 753, 754, 1, 0, 0, 0, 754, 780, 1, 0, 0, 0, 755, 757, 3, 82, 33, 0, 756, 755, 1, 0, 0, 0, 757, 758, 1, 0, 0, 0, 758, 756, 1, 0, 0, 0, 758, 759, 1, 0, 0, 0, 759, 767, 1, 0, 0, 0, 760, 764, 3, 122, 53, 0, 761, 763, 3, 82, 33, 0, 762, 761, 1, 0, 0, 0, 763, 766, 1, 0, 0, 0, 764, 762, 1, 0, 0, 0, 764, 765, 1, 0, 0, 0, 765, 768, 1, 0, 0, 0, 766, 764, 1, 0, 0, 0, 767, 760, 1, 0, 0, 0, 767, 768, 1, 0, 0, 0, 768, 769, 1, 0, 0, 0, 769, 770, 3, 90, 37, 0, 770, 780, 1, 0, 0, 0, 771, 773, 3, 122, 53, 0, 772, 774, 3, 82, 33, 0, 773, 772, 1, 0, 0, 0, 774, 775, 1, 0, 0, 0, 775, 773, 1, 0, 0, 0, 775, 776, 1, 0, 0, 0, 776, 777, 1, 0, 0, 0, 777, 778, 3, 90, 37, 0, 778, 780, 1, 0, 0, 0, 779, 738, 1, 0, 0, 0, 779, 749, 1, 0, 0, 0, 779, 756, 1, 0, 0, 0, 779, 771, 1, 0, 0, 0, 780, 107, 1, 0, 0, 0, 781, 782, 5, 98, 0, 0, 782, 783, 5, 121, 0, 0, 783, 109, 1, 0, 0, 0, 784, 785, 5, 97, 0, 0, 785, 786, 5, 110, 0, 0, 786, 787, 5, 100, 0, 0, 787, 111, 1, 0, 0, 0, 788, 789, 5, 97, 0, 0, 789, 790, 5, 115, 0, 0, 790, 791, 5, 99, 0, 0, 791, 113, 1, 0, 0, 0, 792, 793, 5, 61, 0, 0, 793, 115, 1, 0, 0, 0, 794, 795, 5, 58, 0, 0, 795, 796, 5, 58, 0, 0, 796, 117, 1, 0, 0, 0, 797, 798, 5, 44, 0, 0, 798, 119, 1, 0, 0, 0, 799, 800, 5, 100, 0, 0, 800, 801, 5, 101, 0, 0, 801, 802, 5, 115, 0, 0, 802, 803, 5, 99, 0, 0, 803, 121, 1, 0, 0, 0, 804, 805, 5, 46, 0, 0, 805, 123, 1, 0, 0, 0, 806, 807, 5, 102, 0, 0, 807, 808, 5, 97, 0, 0, 808, 809, 5, 108, 0, 0, 809, 810, 5, 115, 0, 0, 810, 811, 5, 101, 0, 0, 811, 125, 1, 0, 0, 0, 812, 813, 5, 102, 0, 0, 813, 814, 5, 105, 0, 0, 814, 815, 5, 114, 0, 0, 815, 816, 5, 115, 0, 0, 816, 817, 5, 116, 0, 0, 817, 127, 1, 0, 0, 0, 818, 819, 5, 105, 0, 0, 819, 820, 5, 110, 0, 0, 820, 129, 1, 0, 0, 0, 821, 822, 5, 105, 0, 0, 822, 823, 5, 115, 0, 0, 823, 131, 1, 0, 0, 0, 824, 825, 5, 108, 0, 0, 825, 826, 5, 97, 0, 0, 826, 827, 5, 115, 0, 0, 827, 828, 5, 116, 0, 0, 828, 133, 1, 0, 0, 0, 829, 830, 5, 108, 0, 0, 830, 831, 5, 105, 0, 0, 831, 832, 5, 107, 0, 0, 832, 833, 5, 101, 0, 0, 833, 135, 1, 0, 0, 0, 834, 835, 5, 40, 0, 0, 835, 137, 1, 0, 0, 0, 836, 837, 5, 109, 0, 0, 837, 838, 5, 97, 0, 0, 838, 839, 5, 116, 0, 0, 839, 840, 5, 99, 0, 0, 840, 841, 5, 104, 0, 0, 841, 139, 1, 0, 0, 0, 842, 843, 5, 110, 0, 0, 843, 844, 5, 111, 0, 0, 844, 845, 5, 116, 0, 0, 845, 141, 1, 0, 0, 0, 846, 847, 5, 110, 0, 0, 847, 848, 5, 117, 0, 0, 848, 849, 5, 108, 0, 0, 849, 850, 5, 108, 0, 0, 850, 143, 1, 0, 0, 0, 851, 852, 5, 110, 0, 0, 852, 853, 5, 117, 0, 0, 853, 854, 5, 108, 0, 0, 854, 855, 5, 108, 0, 0, 855, 856, 5, 115, 0, 0, 856, 145, 1, 0, 0, 0, 857, 858, 5, 111, 0, 0, 858, 859, 5, 114, 0, 0, 859, 147, 1, 0, 0, 0, 860, 861, 5, 63, 0, 0, 861, 149, 1, 0, 0, 0, 862, 863, 5, 114, 0, 0, 863, 864, 5, 108, 0, 0, 864, 865, 5, 105, 0, 0, 865, 866, 5, 107, 0, 0, 866, 867, 5, 101, 0, 0, 867, 151, 1, 0, 0, 0, 868, 869, 5, 41, 0, 0, 869, 153, 1, 0, 0, 0, 870, 871, 5, 116, 0, 0, 871, 872, 5, 114, 0, 0, 872, 873, 5, 117, 0, 0, 873, 874, 5, 101, 0, 0, 874, 155, 1, 0, 0, 0, 875, 876, 5, 61, 0, 0, 876, 877, 5, 61, 0, 0, 877, 157, 1, 0, 0, 0, 878, 879, 5, 61, 0, 0, 879, 880, 5, 126, 0, 0, 880, 159, 1, 0, 0, 0, 881, 882, 5, 33, 0, 0, 882, 883, 5, 61, 0, 0, 883, 161, 1, 0, 0, 0, 884, 885, 5, 60, 0, 0, 885, 163, 1, 0, 0, 0, 886, 887, 5, 60, 0, 0, 887, 888, 5, 61, 0, 0, 888, 165, 1, 0, 0, 0, 889, 890, 5, 62, 0, 0, 890, 167, 1, 0, 0, 0, 891, 892, 5, 62, 0, 0, 892, 893, 5, 61, 0, 0, 893, 169, 1, 0, 0, 0, 894, 895, 5, 43, 0, 0, 895, 171, 1, 0, 0, 0, 896, 897, 5, 45, 0, 0, 897, 173, 1, 0, 0, 0, 898, 899, 5, 42, 0, 0, 899, 175, 1, 0, 0, 0, 900, 901, 5, 47, 0, 0, 901, 177, 1, 0, 0, 0, 902, 903, 5, 37, 0, 0, 903, 179, 1, 0, 0, 0, 904, 905, 3, 148, 66, 0, 905, 909, 3, 84, 34, 0, 906, 908, 3, 100, 42, 0, 907, 906, 1, 0, 0, 0, 908, 911, 1, 0, 0, 0, 909, 907, 1, 0, 0, 0, 909, 910, 1, 0, 0, 0, 910, 919, 1, 0, 0, 0, 911, 909, 1, 0, 0, 0, 912, 914, 3, 148, 66, 0, 913, 915, 3, 82, 33, 0, 914, 913, 1, 0, 0, 0, 915, 916, 1, 0, 0, 0, 916, 914, 1, 0, 0, 0, 916, 917, 1, 0, 0, 0, 917, 919, 1, 0, 0, 0, 918, 904, 1, 0, 0, 0, 918, 912, 1, 0, 0, 0, 919, 181, 1, 0, 0, 0, 920, 921, 5, 91, 0, 0, 921, 922, 1, 0, 0, 0, 922, 923, 6, 83, 0, 0, 923, 924, 6, 83, 0, 0, 924, 183, 1, 0, 0, 0, 925, 926, 5, 93, 0, 0, 926, 927, 1, 0, 0, 0, 927, 928, 6, 84, 15, 0, 928, 929, 6, 84, 15, 0, 929, 185, 1, 0, 0, 0, 930, 934, 3, 84, 34, 0, 931, 933, 3, 100, 42, 0, 932, 931, 1, 0, 0, 0, 933, 936, 1, 0, 0, 0, 934, 932, 1, 0, 0, 0, 934, 935, 1, 0, 0, 0, 935, 947, 1, 0, 0, 0, 936, 934, 1, 0, 0, 0, 937, 940, 3, 98, 41, 0, 938, 940, 3, 92, 38, 0, 939, 937, 1, 0, 0, 0, 939, 938, 1, 0, 0, 0, 940, 942, 1, 0, 0, 0, 941, 943, 3, 100, 42, 0, 942, 941, 1, 0, 0, 0, 943, 944, 1, 0, 0, 0, 944, 942, 1, 0, 0, 0, 944, 945, 1, 0, 0, 0, 945, 947, 1, 0, 0, 0, 946, 930, 1, 0, 0, 0, 946, 939, 1, 0, 0, 0, 947, 187, 1, 0, 0, 0, 948, 950, 3, 94, 39, 0, 949, 951, 3, 96, 40, 0, 950, 949, 1, 0, 0, 0, 951, 952, 1, 0, 0, 0, 952, 950, 1, 0, 0, 0, 952, 953, 1, 0, 0, 0, 953, 954, 1, 0, 0, 0, 954, 955, 3, 94, 39, 0, 955, 189, 1, 0, 0, 0, 956, 957, 3, 188, 86, 0, 957, 191, 1, 0, 0, 0, 958, 959, 3, 60, 22, 0, 959, 960, 1, 0, 0, 0, 960, 961, 6, 88, 11, 0, 961, 193, 1, 0, 0, 0, 962, 963, 3, 62, 23, 0, 963, 964, 1, 0, 0, 0, 964, 965, 6, 89, 11, 0, 965, 195, 1, 0, 0, 0, 966, 967, 3, 64, 24, 0, 967, 968, 1, 0, 0, 0, 968, 969, 6, 90, 11, 0, 969, 197, 1, 0, 0, 0, 970, 971, 3, 80, 32, 0, 971, 972, 1, 0, 0, 0, 972, 973, 6, 91, 14, 0, 973, 974, 6, 91, 15, 0, 974, 199, 1, 0, 0, 0, 975, 976, 3, 182, 83, 0, 976, 977, 1, 0, 0, 0, 977, 978, 6, 92, 12, 0, 978, 201, 1, 0, 0, 0, 979, 980, 3, 184, 84, 0, 980, 981, 1, 0, 0, 0, 981, 982, 6, 93, 16, 0, 982, 203, 1, 0, 0, 0, 983, 984, 3, 368, 176, 0, 984, 985, 1, 0, 0, 0, 985, 986, 6, 94, 17, 0, 986, 205, 1, 0, 0, 0, 987, 988, 3, 118, 51, 0, 988, 989, 1, 0, 0, 0, 989, 990, 6, 95, 18, 0, 990, 207, 1, 0, 0, 0, 991, 992, 3, 114, 49, 0, 992, 993, 1, 0, 0, 0, 993, 994, 6, 96, 19, 0, 994, 209, 1, 0, 0, 0, 995, 996, 5, 109, 0, 0, 996, 997, 5, 101, 0, 0, 997, 998, 5, 116, 0, 0, 998, 999, 5, 97, 0, 0, 999, 1000, 5, 100, 0, 0, 1000, 1001, 5, 97, 0, 0, 1001, 1002, 5, 116, 0, 0, 1002, 1003, 5, 97, 0, 0, 1003, 211, 1, 0, 0, 0, 1004, 1005, 3, 68, 26, 0, 1005, 1006, 1, 0, 0, 0, 1006, 1007, 6, 98, 20, 0, 1007, 213, 1, 0, 0, 0, 1008, 1009, 3, 102, 43, 0, 1009, 1010, 1, 0, 0, 0, 1010, 1011, 6, 99, 21, 0, 1011, 215, 1, 0, 0, 0, 1012, 1013, 3, 60, 22, 0, 1013, 1014, 1, 0, 0, 0, 1014, 1015, 6, 100, 11, 0, 1015, 217, 1, 0, 0, 0, 1016, 1017, 3, 62, 23, 0, 1017, 1018, 1, 0, 0, 0, 1018, 1019, 6, 101, 11, 0, 1019, 219, 1, 0, 0, 0, 1020, 1021, 3, 64, 24, 0, 1021, 1022, 1, 0, 0, 0, 1022, 1023, 6, 102, 11, 0, 1023, 221, 1, 0, 0, 0, 1024, 1025, 3, 80, 32, 0, 1025, 1026, 1, 0, 0, 0, 1026, 1027, 6, 103, 14, 0, 1027, 1028, 6, 103, 15, 0, 1028, 223, 1, 0, 0, 0, 1029, 1030, 3, 122, 53, 0, 1030, 1031, 1, 0, 0, 0, 1031, 1032, 6, 104, 22, 0, 1032, 225, 1, 0, 0, 0, 1033, 1034, 3, 118, 51, 0, 1034, 1035, 1, 0, 0, 0, 1035, 1036, 6, 105, 18, 0, 1036, 227, 1, 0, 0, 0, 1037, 1042, 3, 84, 34, 0, 1038, 1042, 3, 82, 33, 0, 1039, 1042, 3, 98, 41, 0, 1040, 1042, 3, 174, 79, 0, 1041, 1037, 1, 0, 0, 0, 1041, 1038, 1, 0, 0, 0, 1041, 1039, 1, 0, 0, 0, 1041, 1040, 1, 0, 0, 0, 1042, 229, 1, 0, 0, 0, 1043, 1046, 3, 84, 34, 0, 1044, 1046, 3, 174, 79, 0, 1045, 1043, 1, 0, 0, 0, 1045, 1044, 1, 0, 0, 0, 1046, 1050, 1, 0, 0, 0, 1047, 1049, 3, 228, 106, 0, 1048, 1047, 1, 0, 0, 0, 1049, 1052, 1, 0, 0, 0, 1050, 1048, 1, 0, 0, 0, 1050, 1051, 1, 0, 0, 0, 1051, 1063, 1, 0, 0, 0, 1052, 1050, 1, 0, 0, 0, 1053, 1056, 3, 98, 41, 0, 1054, 1056, 3, 92, 38, 0, 1055, 1053, 1, 0, 0, 0, 1055, 1054, 1, 0, 0, 0, 1056, 1058, 1, 0, 0, 0, 1057, 1059, 3, 228, 106, 0, 1058, 1057, 1, 0, 0, 0, 1059, 1060, 1, 0, 0, 0, 1060, 1058, 1, 0, 0, 0, 1060, 1061, 1, 0, 0, 0, 1061, 1063, 1, 0, 0, 0, 1062, 1045, 1, 0, 0, 0, 1062, 1055, 1, 0, 0, 0, 1063, 231, 1, 0, 0, 0, 1064, 1067, 3, 230, 107, 0, 1065, 1067, 3, 188, 86, 0, 1066, 1064, 1, 0, 0, 0, 1066, 1065, 1, 0, 0, 0, 1067, 1068, 1, 0, 0, 0, 1068, 1066, 1, 0, 0, 0, 1068, 1069, 1, 0, 0, 0, 1069, 233, 1, 0, 0, 0, 1070, 1071, 3, 60, 22, 0, 1071, 1072, 1, 0, 0, 0, 1072, 1073, 6, 109, 11, 0, 1073, 235, 1, 0, 0, 0, 1074, 1075, 3, 62, 23, 0, 1075, 1076, 1, 0, 0, 0, 1076, 1077, 6, 110, 11, 0, 1077, 237, 1, 0, 0, 0, 1078, 1079, 3, 64, 24, 0, 1079, 1080, 1, 0, 0, 0, 1080, 1081, 6, 111, 11, 0, 1081, 239, 1, 0, 0, 0, 1082, 1083, 3, 80, 32, 0, 1083, 1084, 1, 0, 0, 0, 1084, 1085, 6, 112, 14, 0, 1085, 1086, 6, 112, 15, 0, 1086, 241, 1, 0, 0, 0, 1087, 1088, 3, 114, 49, 0, 1088, 1089, 1, 0, 0, 0, 1089, 1090, 6, 113, 19, 0, 1090, 243, 1, 0, 0, 0, 1091, 1092, 3, 118, 51, 0, 1092, 1093, 1, 0, 0, 0, 1093, 1094, 6, 114, 18, 0, 1094, 245, 1, 0, 0, 0, 1095, 1096, 3, 122, 53, 0, 1096, 1097, 1, 0, 0, 0, 1097, 1098, 6, 115, 22, 0, 1098, 247, 1, 0, 0, 0, 1099, 1100, 5, 97, 0, 0, 1100, 1101, 5, 115, 0, 0, 1101, 249, 1, 0, 0, 0, 1102, 1103, 3, 232, 108, 0, 1103, 1104, 1, 0, 0, 0, 1104, 1105, 6, 117, 23, 0, 1105, 251, 1, 0, 0, 0, 1106, 1107, 3, 60, 22, 0, 1107, 1108, 1, 0, 0, 0, 1108, 1109, 6, 118, 11, 0, 1109, 253, 1, 0, 0, 0, 1110, 1111, 3, 62, 23, 0, 1111, 1112, 1, 0, 0, 0, 1112, 1113, 6, 119, 11, 0, 1113, 255, 1, 0, 0, 0, 1114, 1115, 3, 64, 24, 0, 1115, 1116, 1, 0, 0, 0, 1116, 1117, 6, 120, 11, 0, 1117, 257, 1, 0, 0, 0, 1118, 1119, 3, 80, 32, 0, 1119, 1120, 1, 0, 0, 0, 1120, 1121, 6, 121, 14, 0, 1121, 1122, 6, 121, 15, 0, 1122, 259, 1, 0, 0, 0, 1123, 1124, 3, 182, 83, 0, 1124, 1125, 1, 0, 0, 0, 1125, 1126, 6, 122, 12, 0, 1126, 1127, 6, 122, 24, 0, 1127, 261, 1, 0, 0, 0, 1128, 1129, 5, 111, 0, 0, 1129, 1130, 5, 110, 0, 0, 1130, 1131, 1, 0, 0, 0, 1131, 1132, 6, 123, 25, 0, 1132, 263, 1, 0, 0, 0, 1133, 1134, 5, 119, 0, 0, 1134, 1135, 5, 105, 0, 0, 1135, 1136, 5, 116, 0, 0, 1136, 1137, 5, 104, 0, 0, 1137, 1138, 1, 0, 0, 0, 1138, 1139, 6, 124, 25, 0, 1139, 265, 1, 0, 0, 0, 1140, 1141, 8, 12, 0, 0, 1141, 267, 1, 0, 0, 0, 1142, 1144, 3, 266, 125, 0, 1143, 1142, 1, 0, 0, 0, 1144, 1145, 1, 0, 0, 0, 1145, 1143, 1, 0, 0, 0, 1145, 1146, 1, 0, 0, 0, 1146, 1147, 1, 0, 0, 0, 1147, 1148, 3, 368, 176, 0, 1148, 1150, 1, 0, 0, 0, 1149, 1143, 1, 0, 0, 0, 1149, 1150, 1, 0, 0, 0, 1150, 1152, 1, 0, 0, 0, 1151, 1153, 3, 266, 125, 0, 1152, 1151, 1, 0, 0, 0, 1153, 1154, 1, 0, 0, 0, 1154, 1152, 1, 0, 0, 0, 1154, 1155, 1, 0, 0, 0, 1155, 269, 1, 0, 0, 0, 1156, 1157, 3, 268, 126, 0, 1157, 1158, 1, 0, 0, 0, 1158, 1159, 6, 127, 26, 0, 1159, 271, 1, 0, 0, 0, 1160, 1161, 3, 60, 22, 0, 1161, 1162, 1, 0, 0, 0, 1162, 1163, 6, 128, 11, 0, 1163, 273, 1, 0, 0, 0, 1164, 1165, 3, 62, 23, 0, 1165, 1166, 1, 0, 0, 0, 1166, 1167, 6, 129, 11, 0, 1167, 275, 1, 0, 0, 0, 1168, 1169, 3, 64, 24, 0, 1169, 1170, 1, 0, 0, 0, 1170, 1171, 6, 130, 11, 0, 1171, 277, 1, 0, 0, 0, 1172, 1173, 3, 80, 32, 0, 1173, 1174, 1, 0, 0, 0, 1174, 1175, 6, 131, 14, 0, 1175, 1176, 6, 131, 15, 0, 1176, 1177, 6, 131, 15, 0, 1177, 279, 1, 0, 0, 0, 1178, 1179, 3, 114, 49, 0, 1179, 1180, 1, 0, 0, 0, 1180, 1181, 6, 132, 19, 0, 1181, 281, 1, 0, 0, 0, 1182, 1183, 3, 118, 51, 0, 1183, 1184, 1, 0, 0, 0, 1184, 1185, 6, 133, 18, 0, 1185, 283, 1, 0, 0, 0, 1186, 1187, 3, 122, 53, 0, 1187, 1188, 1, 0, 0, 0, 1188, 1189, 6, 134, 22, 0, 1189, 285, 1, 0, 0, 0, 1190, 1191, 3, 264, 124, 0, 1191, 1192, 1, 0, 0, 0, 1192, 1193, 6, 135, 27, 0, 1193, 287, 1, 0, 0, 0, 1194, 1195, 3, 232, 108, 0, 1195, 1196, 1, 0, 0, 0, 1196, 1197, 6, 136, 23, 0, 1197, 289, 1, 0, 0, 0, 1198, 1199, 3, 190, 87, 0, 1199, 1200, 1, 0, 0, 0, 1200, 1201, 6, 137, 28, 0, 1201, 291, 1, 0, 0, 0, 1202, 1203, 3, 60, 22, 0, 1203, 1204, 1, 0, 0, 0, 1204, 1205, 6, 138, 11, 0, 1205, 293, 1, 0, 0, 0, 1206, 1207, 3, 62, 23, 0, 1207, 1208, 1, 0, 0, 0, 1208, 1209, 6, 139, 11, 0, 1209, 295, 1, 0, 0, 0, 1210, 1211, 3, 64, 24, 0, 1211, 1212, 1, 0, 0, 0, 1212, 1213, 6, 140, 11, 0, 1213, 297, 1, 0, 0, 0, 1214, 1215, 3, 80, 32, 0, 1215, 1216, 1, 0, 0, 0, 1216, 1217, 6, 141, 14, 0, 1217, 1218, 6, 141, 15, 0, 1218, 299, 1, 0, 0, 0, 1219, 1220, 3, 368, 176, 0, 1220, 1221, 1, 0, 0, 0, 1221, 1222, 6, 142, 17, 0, 1222, 301, 1, 0, 0, 0, 1223, 1224, 3, 118, 51, 0, 1224, 1225, 1, 0, 0, 0, 1225, 1226, 6, 143, 18, 0, 1226, 303, 1, 0, 0, 0, 1227, 1228, 3, 122, 53, 0, 1228, 1229, 1, 0, 0, 0, 1229, 1230, 6, 144, 22, 0, 1230, 305, 1, 0, 0, 0, 1231, 1232, 3, 262, 123, 0, 1232, 1233, 1, 0, 0, 0, 1233, 1234, 6, 145, 29, 0, 1234, 1235, 6, 145, 30, 0, 1235, 307, 1, 0, 0, 0, 1236, 1237, 3, 68, 26, 0, 1237, 1238, 1, 0, 0, 0, 1238, 1239, 6, 146, 20, 0, 1239, 309, 1, 0, 0, 0, 1240, 1241, 3, 102, 43, 0, 1241, 1242, 1, 0, 0, 0, 1242, 1243, 6, 147, 21, 0, 1243, 311, 1, 0, 0, 0, 1244, 1245, 3, 60, 22, 0, 1245, 1246, 1, 0, 0, 0, 1246, 1247, 6, 148, 11, 0, 1247, 313, 1, 0, 0, 0, 1248, 1249, 3, 62, 23, 0, 1249, 1250, 1, 0, 0, 0, 1250, 1251, 6, 149, 11, 0, 1251, 315, 1, 0, 0, 0, 1252, 1253, 3, 64, 24, 0, 1253, 1254, 1, 0, 0, 0, 1254, 1255, 6, 150, 11, 0, 1255, 317, 1, 0, 0, 0, 1256, 1257, 3, 80, 32, 0, 1257, 1258, 1, 0, 0, 0, 1258, 1259, 6, 151, 14, 0, 1259, 1260, 6, 151, 15, 0, 1260, 1261, 6, 151, 15, 0, 1261, 319, 1, 0, 0, 0, 1262, 1263, 3, 118, 51, 0, 1263, 1264, 1, 0, 0, 0, 1264, 1265, 6, 152, 18, 0, 1265, 321, 1, 0, 0, 0, 1266, 1267, 3, 122, 53, 0, 1267, 1268, 1, 0, 0, 0, 1268, 1269, 6, 153, 22, 0, 1269, 323, 1, 0, 0, 0, 1270, 1271, 3, 232, 108, 0, 1271, 1272, 1, 0, 0, 0, 1272, 1273, 6, 154, 23, 0, 1273, 325, 1, 0, 0, 0, 1274, 1275, 3, 60, 22, 0, 1275, 1276, 1, 0, 0, 0, 1276, 1277, 6, 155, 11, 0, 1277, 327, 1, 0, 0, 0, 1278, 1279, 3, 62, 23, 0, 1279, 1280, 1, 0, 0, 0, 1280, 1281, 6, 156, 11, 0, 1281, 329, 1, 0, 0, 0, 1282, 1283, 3, 64, 24, 0, 1283, 1284, 1, 0, 0, 0, 1284, 1285, 6, 157, 11, 0, 1285, 331, 1, 0, 0, 0, 1286, 1287, 3, 80, 32, 0, 1287, 1288, 1, 0, 0, 0, 1288, 1289, 6, 158, 14, 0, 1289, 1290, 6, 158, 15, 0, 1290, 333, 1, 0, 0, 0, 1291, 1292, 3, 122, 53, 0, 1292, 1293, 1, 0, 0, 0, 1293, 1294, 6, 159, 22, 0, 1294, 335, 1, 0, 0, 0, 1295, 1296, 3, 190, 87, 0, 1296, 1297, 1, 0, 0, 0, 1297, 1298, 6, 160, 28, 0, 1298, 337, 1, 0, 0, 0, 1299, 1300, 3, 186, 85, 0, 1300, 1301, 1, 0, 0, 0, 1301, 1302, 6, 161, 31, 0, 1302, 339, 1, 0, 0, 0, 1303, 1304, 3, 60, 22, 0, 1304, 1305, 1, 0, 0, 0, 1305, 1306, 6, 162, 11, 0, 1306, 341, 1, 0, 0, 0, 1307, 1308, 3, 62, 23, 0, 1308, 1309, 1, 0, 0, 0, 1309, 1310, 6, 163, 11, 0, 1310, 343, 1, 0, 0, 0, 1311, 1312, 3, 64, 24, 0, 1312, 1313, 1, 0, 0, 0, 1313, 1314, 6, 164, 11, 0, 1314, 345, 1, 0, 0, 0, 1315, 1316, 3, 80, 32, 0, 1316, 1317, 1, 0, 0, 0, 1317, 1318, 6, 165, 14, 0, 1318, 1319, 6, 165, 15, 0, 1319, 347, 1, 0, 0, 0, 1320, 1321, 5, 105, 0, 0, 1321, 1322, 5, 110, 0, 0, 1322, 1323, 5, 102, 0, 0, 1323, 1324, 5, 111, 0, 0, 1324, 349, 1, 0, 0, 0, 1325, 1326, 3, 60, 22, 0, 1326, 1327, 1, 0, 0, 0, 1327, 1328, 6, 167, 11, 0, 1328, 351, 1, 0, 0, 0, 1329, 1330, 3, 62, 23, 0, 1330, 1331, 1, 0, 0, 0, 1331, 1332, 6, 168, 11, 0, 1332, 353, 1, 0, 0, 0, 1333, 1334, 3, 64, 24, 0, 1334, 1335, 1, 0, 0, 0, 1335, 1336, 6, 169, 11, 0, 1336, 355, 1, 0, 0, 0, 1337, 1338, 3, 80, 32, 0, 1338, 1339, 1, 0, 0, 0, 1339, 1340, 6, 170, 14, 0, 1340, 1341, 6, 170, 15, 0, 1341, 357, 1, 0, 0, 0, 1342, 1343, 5, 102, 0, 0, 1343, 1344, 5, 117, 0, 0, 1344, 1345, 5, 110, 0, 0, 1345, 1346, 5, 99, 0, 0, 1346, 1347, 5, 116, 0, 0, 1347, 1348, 5, 105, 0, 0, 1348, 1349, 5, 111, 0, 0, 1349, 1350, 5, 110, 0, 0, 1350, 1351, 5, 115, 0, 0, 1351, 359, 1, 0, 0, 0, 1352, 1353, 3, 60, 22, 0, 1353, 1354, 1, 0, 0, 0, 1354, 1355, 6, 172, 11, 0, 1355, 361, 1, 0, 0, 0, 1356, 1357, 3, 62, 23, 0, 1357, 1358, 1, 0, 0, 0, 1358, 1359, 6, 173, 11, 0, 1359, 363, 1, 0, 0, 0, 1360, 1361, 3, 64, 24, 0, 1361, 1362, 1, 0, 0, 0, 1362, 1363, 6, 174, 11, 0, 1363, 365, 1, 0, 0, 0, 1364, 1365, 3, 184, 84, 0, 1365, 1366, 1, 0, 0, 0, 1366, 1367, 6, 175, 16, 0, 1367, 1368, 6, 175, 15, 0, 1368, 367, 1, 0, 0, 0, 1369, 1370, 5, 58, 0, 0, 1370, 369, 1, 0, 0, 0, 1371, 1377, 3, 92, 38, 0, 1372, 1377, 3, 82, 33, 0, 1373, 1377, 3, 122, 53, 0, 1374, 1377, 3, 84, 34, 0, 1375, 1377, 3, 98, 41, 0, 1376, 1371, 1, 0, 0, 0, 1376, 1372, 1, 0, 0, 0, 1376, 1373, 1, 0, 0, 0, 1376, 1374, 1, 0, 0, 0, 1376, 1375, 1, 0, 0, 0, 1377, 1378, 1, 0, 0, 0, 1378, 1376, 1, 0, 0, 0, 1378, 1379, 1, 0, 0, 0, 1379, 371, 1, 0, 0, 0, 1380, 1381, 3, 60, 22, 0, 1381, 1382, 1, 0, 0, 0, 1382, 1383, 6, 178, 11, 0, 1383, 373, 1, 0, 0, 0, 1384, 1385, 3, 62, 23, 0, 1385, 1386, 1, 0, 0, 0, 1386, 1387, 6, 179, 11, 0, 1387, 375, 1, 0, 0, 0, 1388, 1389, 3, 64, 24, 0, 1389, 1390, 1, 0, 0, 0, 1390, 1391, 6, 180, 11, 0, 1391, 377, 1, 0, 0, 0, 1392, 1393, 3, 80, 32, 0, 1393, 1394, 1, 0, 0, 0, 1394, 1395, 6, 181, 14, 0, 1395, 1396, 6, 181, 15, 0, 1396, 379, 1, 0, 0, 0, 1397, 1398, 3, 68, 26, 0, 1398, 1399, 1, 0, 0, 0, 1399, 1400, 6, 182, 20, 0, 1400, 1401, 6, 182, 15, 0, 1401, 1402, 6, 182, 32, 0, 1402, 381, 1, 0, 0, 0, 1403, 1404, 3, 102, 43, 0, 1404, 1405, 1, 0, 0, 0, 1405, 1406, 6, 183, 21, 0, 1406, 1407, 6, 183, 15, 0, 1407, 1408, 6, 183, 32, 0, 1408, 383, 1, 0, 0, 0, 1409, 1410, 3, 60, 22, 0, 1410, 1411, 1, 0, 0, 0, 1411, 1412, 6, 184, 11, 0, 1412, 385, 1, 0, 0, 0, 1413, 1414, 3, 62, 23, 0, 1414, 1415, 1, 0, 0, 0, 1415, 1416, 6, 185, 11, 0, 1416, 387, 1, 0, 0, 0, 1417, 1418, 3, 64, 24, 0, 1418, 1419, 1, 0, 0, 0, 1419, 1420, 6, 186, 11, 0, 1420, 389, 1, 0, 0, 0, 1421, 1422, 3, 368, 176, 0, 1422, 1423, 1, 0, 0, 0, 1423, 1424, 6, 187, 17, 0, 1424, 1425, 6, 187, 15, 0, 1425, 1426, 6, 187, 7, 0, 1426, 391, 1, 0, 0, 0, 1427, 1428, 3, 118, 51, 0, 1428, 1429, 1, 0, 0, 0, 1429, 1430, 6, 188, 18, 0, 1430, 1431, 6, 188, 15, 0, 1431, 1432, 6, 188, 7, 0, 1432, 393, 1, 0, 0, 0, 1433, 1434, 3, 60, 22, 0, 1434, 1435, 1, 0, 0, 0, 1435, 1436, 6, 189, 11, 0, 1436, 395, 1, 0, 0, 0, 1437, 1438, 3, 62, 23, 0, 1438, 1439, 1, 0, 0, 0, 1439, 1440, 6, 190, 11, 0, 1440, 397, 1, 0, 0, 0, 1441, 1442, 3, 64, 24, 0, 1442, 1443, 1, 0, 0, 0, 1443, 1444, 6, 191, 11, 0, 1444, 399, 1, 0, 0, 0, 1445, 1446, 3, 190, 87, 0, 1446, 1447, 1, 0, 0, 0, 1447, 1448, 6, 192, 15, 0, 1448, 1449, 6, 192, 0, 0, 1449, 1450, 6, 192, 28, 0, 1450, 401, 1, 0, 0, 0, 1451, 1452, 3, 186, 85, 0, 1452, 1453, 1, 0, 0, 0, 1453, 1454, 6, 193, 15, 0, 1454, 1455, 6, 193, 0, 0, 1455, 1456, 6, 193, 31, 0, 1456, 403, 1, 0, 0, 0, 1457, 1458, 3, 108, 46, 0, 1458, 1459, 1, 0, 0, 0, 1459, 1460, 6, 194, 15, 0, 1460, 1461, 6, 194, 0, 0, 1461, 1462, 6, 194, 33, 0, 1462, 405, 1, 0, 0, 0, 1463, 1464, 3, 80, 32, 0, 1464, 1465, 1, 0, 0, 0, 1465, 1466, 6, 195, 14, 0, 1466, 1467, 6, 195, 15, 0, 1467, 407, 1, 0, 0, 0, 65, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 588, 598, 602, 605, 614, 616, 627, 634, 639, 678, 683, 692, 699, 704, 706, 717, 725, 728, 730, 735, 740, 746, 753, 758, 764, 767, 775, 779, 909, 916, 918, 934, 939, 944, 946, 952, 1041, 1045, 1050, 1055, 1060, 1062, 1066, 1068, 1145, 1149, 1154, 1376, 1378, 34, 5, 2, 0, 5, 4, 0, 5, 6, 0, 5, 1, 0, 5, 3, 0, 5, 8, 0, 5, 12, 0, 5, 14, 0, 5, 10, 0, 5, 5, 0, 5, 11, 0, 0, 1, 0, 7, 71, 0, 5, 0, 0, 7, 30, 0, 4, 0, 0, 7, 72, 0, 7, 116, 0, 7, 39, 0, 7, 37, 0, 7, 26, 0, 7, 31, 0, 7, 41, 0, 7, 82, 0, 5, 13, 0, 5, 7, 0, 7, 92, 0, 7, 91, 0, 7, 74, 0, 7, 90, 0, 5, 9, 0, 7, 73, 0, 5, 15, 0, 7, 34, 0] \ No newline at end of file +[4, 0, 126, 1471, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, 7, 62, 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, 67, 2, 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, 2, 73, 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, 78, 7, 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, 7, 83, 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, 88, 2, 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, 2, 94, 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, 99, 7, 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, 2, 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, 7, 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, 2, 113, 7, 113, 2, 114, 7, 114, 2, 115, 7, 115, 2, 116, 7, 116, 2, 117, 7, 117, 2, 118, 7, 118, 2, 119, 7, 119, 2, 120, 7, 120, 2, 121, 7, 121, 2, 122, 7, 122, 2, 123, 7, 123, 2, 124, 7, 124, 2, 125, 7, 125, 2, 126, 7, 126, 2, 127, 7, 127, 2, 128, 7, 128, 2, 129, 7, 129, 2, 130, 7, 130, 2, 131, 7, 131, 2, 132, 7, 132, 2, 133, 7, 133, 2, 134, 7, 134, 2, 135, 7, 135, 2, 136, 7, 136, 2, 137, 7, 137, 2, 138, 7, 138, 2, 139, 7, 139, 2, 140, 7, 140, 2, 141, 7, 141, 2, 142, 7, 142, 2, 143, 7, 143, 2, 144, 7, 144, 2, 145, 7, 145, 2, 146, 7, 146, 2, 147, 7, 147, 2, 148, 7, 148, 2, 149, 7, 149, 2, 150, 7, 150, 2, 151, 7, 151, 2, 152, 7, 152, 2, 153, 7, 153, 2, 154, 7, 154, 2, 155, 7, 155, 2, 156, 7, 156, 2, 157, 7, 157, 2, 158, 7, 158, 2, 159, 7, 159, 2, 160, 7, 160, 2, 161, 7, 161, 2, 162, 7, 162, 2, 163, 7, 163, 2, 164, 7, 164, 2, 165, 7, 165, 2, 166, 7, 166, 2, 167, 7, 167, 2, 168, 7, 168, 2, 169, 7, 169, 2, 170, 7, 170, 2, 171, 7, 171, 2, 172, 7, 172, 2, 173, 7, 173, 2, 174, 7, 174, 2, 175, 7, 175, 2, 176, 7, 176, 2, 177, 7, 177, 2, 178, 7, 178, 2, 179, 7, 179, 2, 180, 7, 180, 2, 181, 7, 181, 2, 182, 7, 182, 2, 183, 7, 183, 2, 184, 7, 184, 2, 185, 7, 185, 2, 186, 7, 186, 2, 187, 7, 187, 2, 188, 7, 188, 2, 189, 7, 189, 2, 190, 7, 190, 2, 191, 7, 191, 2, 192, 7, 192, 2, 193, 7, 193, 2, 194, 7, 194, 2, 195, 7, 195, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 21, 4, 21, 587, 8, 21, 11, 21, 12, 21, 588, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 5, 22, 597, 8, 22, 10, 22, 12, 22, 600, 9, 22, 1, 22, 3, 22, 603, 8, 22, 1, 22, 3, 22, 606, 8, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 5, 23, 615, 8, 23, 10, 23, 12, 23, 618, 9, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 24, 4, 24, 626, 8, 24, 11, 24, 12, 24, 627, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 3, 25, 635, 8, 25, 1, 26, 4, 26, 638, 8, 26, 11, 26, 12, 26, 639, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1, 28, 1, 29, 1, 29, 1, 29, 1, 29, 1, 30, 1, 30, 1, 30, 1, 30, 1, 31, 1, 31, 1, 31, 1, 31, 1, 32, 1, 32, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 37, 1, 37, 3, 37, 679, 8, 37, 1, 37, 4, 37, 682, 8, 37, 11, 37, 12, 37, 683, 1, 38, 1, 38, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 3, 40, 693, 8, 40, 1, 41, 1, 41, 1, 42, 1, 42, 1, 42, 3, 42, 700, 8, 42, 1, 43, 1, 43, 1, 43, 5, 43, 705, 8, 43, 10, 43, 12, 43, 708, 9, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 5, 43, 716, 8, 43, 10, 43, 12, 43, 719, 9, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 3, 43, 726, 8, 43, 1, 43, 3, 43, 729, 8, 43, 3, 43, 731, 8, 43, 1, 44, 4, 44, 734, 8, 44, 11, 44, 12, 44, 735, 1, 45, 4, 45, 739, 8, 45, 11, 45, 12, 45, 740, 1, 45, 1, 45, 5, 45, 745, 8, 45, 10, 45, 12, 45, 748, 9, 45, 1, 45, 1, 45, 4, 45, 752, 8, 45, 11, 45, 12, 45, 753, 1, 45, 4, 45, 757, 8, 45, 11, 45, 12, 45, 758, 1, 45, 1, 45, 5, 45, 763, 8, 45, 10, 45, 12, 45, 766, 9, 45, 3, 45, 768, 8, 45, 1, 45, 1, 45, 1, 45, 1, 45, 4, 45, 774, 8, 45, 11, 45, 12, 45, 775, 1, 45, 1, 45, 3, 45, 780, 8, 45, 1, 46, 1, 46, 1, 46, 1, 47, 1, 47, 1, 47, 1, 47, 1, 48, 1, 48, 1, 48, 1, 48, 1, 49, 1, 49, 1, 50, 1, 50, 1, 50, 1, 51, 1, 51, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 53, 1, 53, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 56, 1, 56, 1, 56, 1, 57, 1, 57, 1, 57, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 59, 1, 59, 1, 59, 1, 59, 1, 59, 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 62, 1, 62, 1, 62, 1, 62, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 65, 1, 65, 1, 65, 1, 66, 1, 66, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 68, 1, 68, 1, 69, 1, 69, 1, 69, 1, 69, 1, 69, 1, 70, 1, 70, 1, 70, 1, 71, 1, 71, 1, 71, 1, 72, 1, 72, 1, 72, 1, 73, 1, 73, 1, 74, 1, 74, 1, 74, 1, 75, 1, 75, 1, 76, 1, 76, 1, 76, 1, 77, 1, 77, 1, 78, 1, 78, 1, 79, 1, 79, 1, 80, 1, 80, 1, 81, 1, 81, 1, 82, 1, 82, 1, 82, 3, 82, 908, 8, 82, 1, 82, 5, 82, 911, 8, 82, 10, 82, 12, 82, 914, 9, 82, 1, 82, 1, 82, 4, 82, 918, 8, 82, 11, 82, 12, 82, 919, 3, 82, 922, 8, 82, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 85, 1, 85, 5, 85, 936, 8, 85, 10, 85, 12, 85, 939, 9, 85, 1, 85, 1, 85, 3, 85, 943, 8, 85, 1, 85, 4, 85, 946, 8, 85, 11, 85, 12, 85, 947, 3, 85, 950, 8, 85, 1, 86, 1, 86, 4, 86, 954, 8, 86, 11, 86, 12, 86, 955, 1, 86, 1, 86, 1, 87, 1, 87, 1, 88, 1, 88, 1, 88, 1, 88, 1, 89, 1, 89, 1, 89, 1, 89, 1, 90, 1, 90, 1, 90, 1, 90, 1, 91, 1, 91, 1, 91, 1, 91, 1, 91, 1, 92, 1, 92, 1, 92, 1, 92, 1, 93, 1, 93, 1, 93, 1, 93, 1, 94, 1, 94, 1, 94, 1, 94, 1, 95, 1, 95, 1, 95, 1, 95, 1, 96, 1, 96, 1, 96, 1, 96, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 98, 1, 98, 1, 98, 1, 98, 1, 99, 1, 99, 1, 99, 1, 99, 1, 100, 1, 100, 1, 100, 1, 100, 1, 101, 1, 101, 1, 101, 1, 101, 1, 102, 1, 102, 1, 102, 1, 102, 1, 103, 1, 103, 1, 103, 1, 103, 1, 103, 1, 104, 1, 104, 1, 104, 1, 104, 1, 105, 1, 105, 1, 105, 1, 105, 1, 106, 1, 106, 1, 106, 1, 106, 3, 106, 1045, 8, 106, 1, 107, 1, 107, 3, 107, 1049, 8, 107, 1, 107, 5, 107, 1052, 8, 107, 10, 107, 12, 107, 1055, 9, 107, 1, 107, 1, 107, 3, 107, 1059, 8, 107, 1, 107, 4, 107, 1062, 8, 107, 11, 107, 12, 107, 1063, 3, 107, 1066, 8, 107, 1, 108, 1, 108, 4, 108, 1070, 8, 108, 11, 108, 12, 108, 1071, 1, 109, 1, 109, 1, 109, 1, 109, 1, 110, 1, 110, 1, 110, 1, 110, 1, 111, 1, 111, 1, 111, 1, 111, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 113, 1, 113, 1, 113, 1, 113, 1, 114, 1, 114, 1, 114, 1, 114, 1, 115, 1, 115, 1, 115, 1, 115, 1, 116, 1, 116, 1, 116, 1, 117, 1, 117, 1, 117, 1, 117, 1, 118, 1, 118, 1, 118, 1, 118, 1, 119, 1, 119, 1, 119, 1, 119, 1, 120, 1, 120, 1, 120, 1, 120, 1, 121, 1, 121, 1, 121, 1, 121, 1, 121, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 123, 1, 123, 1, 123, 1, 123, 1, 123, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 125, 1, 125, 1, 126, 4, 126, 1147, 8, 126, 11, 126, 12, 126, 1148, 1, 126, 1, 126, 3, 126, 1153, 8, 126, 1, 126, 4, 126, 1156, 8, 126, 11, 126, 12, 126, 1157, 1, 127, 1, 127, 1, 127, 1, 127, 1, 128, 1, 128, 1, 128, 1, 128, 1, 129, 1, 129, 1, 129, 1, 129, 1, 130, 1, 130, 1, 130, 1, 130, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 132, 1, 132, 1, 132, 1, 132, 1, 133, 1, 133, 1, 133, 1, 133, 1, 134, 1, 134, 1, 134, 1, 134, 1, 135, 1, 135, 1, 135, 1, 135, 1, 136, 1, 136, 1, 136, 1, 136, 1, 137, 1, 137, 1, 137, 1, 137, 1, 138, 1, 138, 1, 138, 1, 138, 1, 139, 1, 139, 1, 139, 1, 139, 1, 140, 1, 140, 1, 140, 1, 140, 1, 141, 1, 141, 1, 141, 1, 141, 1, 141, 1, 142, 1, 142, 1, 142, 1, 142, 1, 143, 1, 143, 1, 143, 1, 143, 1, 144, 1, 144, 1, 144, 1, 144, 1, 145, 1, 145, 1, 145, 1, 145, 1, 145, 1, 146, 1, 146, 1, 146, 1, 146, 1, 147, 1, 147, 1, 147, 1, 147, 1, 148, 1, 148, 1, 148, 1, 148, 1, 149, 1, 149, 1, 149, 1, 149, 1, 150, 1, 150, 1, 150, 1, 150, 1, 151, 1, 151, 1, 151, 1, 151, 1, 151, 1, 151, 1, 152, 1, 152, 1, 152, 1, 152, 1, 153, 1, 153, 1, 153, 1, 153, 1, 154, 1, 154, 1, 154, 1, 154, 1, 155, 1, 155, 1, 155, 1, 155, 1, 156, 1, 156, 1, 156, 1, 156, 1, 157, 1, 157, 1, 157, 1, 157, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 159, 1, 159, 1, 159, 1, 159, 1, 160, 1, 160, 1, 160, 1, 160, 1, 161, 1, 161, 1, 161, 1, 161, 1, 162, 1, 162, 1, 162, 1, 162, 1, 163, 1, 163, 1, 163, 1, 163, 1, 164, 1, 164, 1, 164, 1, 164, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 166, 1, 166, 1, 166, 1, 166, 1, 166, 1, 167, 1, 167, 1, 167, 1, 167, 1, 168, 1, 168, 1, 168, 1, 168, 1, 169, 1, 169, 1, 169, 1, 169, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 172, 1, 172, 1, 172, 1, 172, 1, 173, 1, 173, 1, 173, 1, 173, 1, 174, 1, 174, 1, 174, 1, 174, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 176, 1, 176, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 4, 177, 1380, 8, 177, 11, 177, 12, 177, 1381, 1, 178, 1, 178, 1, 178, 1, 178, 1, 179, 1, 179, 1, 179, 1, 179, 1, 180, 1, 180, 1, 180, 1, 180, 1, 181, 1, 181, 1, 181, 1, 181, 1, 181, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 183, 1, 183, 1, 183, 1, 183, 1, 183, 1, 183, 1, 184, 1, 184, 1, 184, 1, 184, 1, 185, 1, 185, 1, 185, 1, 185, 1, 186, 1, 186, 1, 186, 1, 186, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 188, 1, 188, 1, 188, 1, 188, 1, 188, 1, 188, 1, 189, 1, 189, 1, 189, 1, 189, 1, 190, 1, 190, 1, 190, 1, 190, 1, 191, 1, 191, 1, 191, 1, 191, 1, 192, 1, 192, 1, 192, 1, 192, 1, 192, 1, 192, 1, 193, 1, 193, 1, 193, 1, 193, 1, 193, 1, 193, 1, 194, 1, 194, 1, 194, 1, 194, 1, 194, 1, 194, 1, 195, 1, 195, 1, 195, 1, 195, 1, 195, 2, 616, 717, 0, 196, 16, 1, 18, 2, 20, 3, 22, 4, 24, 5, 26, 6, 28, 7, 30, 8, 32, 9, 34, 10, 36, 11, 38, 12, 40, 13, 42, 14, 44, 15, 46, 16, 48, 17, 50, 18, 52, 19, 54, 20, 56, 21, 58, 22, 60, 23, 62, 24, 64, 25, 66, 0, 68, 26, 70, 0, 72, 0, 74, 27, 76, 28, 78, 29, 80, 30, 82, 0, 84, 0, 86, 0, 88, 0, 90, 0, 92, 0, 94, 0, 96, 0, 98, 0, 100, 0, 102, 31, 104, 32, 106, 33, 108, 34, 110, 35, 112, 36, 114, 37, 116, 38, 118, 39, 120, 40, 122, 41, 124, 42, 126, 43, 128, 44, 130, 45, 132, 46, 134, 47, 136, 48, 138, 49, 140, 50, 142, 51, 144, 52, 146, 53, 148, 54, 150, 55, 152, 56, 154, 57, 156, 58, 158, 59, 160, 60, 162, 61, 164, 62, 166, 63, 168, 64, 170, 65, 172, 66, 174, 67, 176, 68, 178, 69, 180, 70, 182, 71, 184, 72, 186, 73, 188, 0, 190, 74, 192, 75, 194, 76, 196, 77, 198, 0, 200, 0, 202, 0, 204, 0, 206, 0, 208, 0, 210, 78, 212, 0, 214, 0, 216, 79, 218, 80, 220, 81, 222, 0, 224, 0, 226, 0, 228, 0, 230, 0, 232, 82, 234, 83, 236, 84, 238, 85, 240, 0, 242, 0, 244, 0, 246, 0, 248, 86, 250, 0, 252, 87, 254, 88, 256, 89, 258, 0, 260, 0, 262, 90, 264, 91, 266, 0, 268, 92, 270, 0, 272, 93, 274, 94, 276, 95, 278, 0, 280, 0, 282, 0, 284, 0, 286, 0, 288, 0, 290, 0, 292, 96, 294, 97, 296, 98, 298, 0, 300, 0, 302, 0, 304, 0, 306, 0, 308, 0, 310, 0, 312, 99, 314, 100, 316, 101, 318, 0, 320, 0, 322, 0, 324, 0, 326, 102, 328, 103, 330, 104, 332, 0, 334, 0, 336, 0, 338, 0, 340, 105, 342, 106, 344, 107, 346, 0, 348, 108, 350, 109, 352, 110, 354, 111, 356, 0, 358, 112, 360, 113, 362, 114, 364, 115, 366, 0, 368, 116, 370, 117, 372, 118, 374, 119, 376, 120, 378, 0, 380, 0, 382, 0, 384, 121, 386, 122, 388, 123, 390, 0, 392, 0, 394, 124, 396, 125, 398, 126, 400, 0, 402, 0, 404, 0, 406, 0, 16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 13, 6, 0, 9, 10, 13, 13, 32, 32, 47, 47, 91, 91, 93, 93, 2, 0, 10, 10, 13, 13, 3, 0, 9, 10, 13, 13, 32, 32, 11, 0, 9, 10, 13, 13, 32, 32, 34, 34, 44, 44, 47, 47, 58, 58, 61, 61, 91, 91, 93, 93, 124, 124, 2, 0, 42, 42, 47, 47, 1, 0, 48, 57, 2, 0, 65, 90, 97, 122, 5, 0, 34, 34, 92, 92, 110, 110, 114, 114, 116, 116, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 1, 0, 96, 96, 11, 0, 9, 10, 13, 13, 32, 32, 34, 35, 44, 44, 47, 47, 58, 58, 60, 60, 62, 63, 92, 92, 124, 124, 1498, 0, 16, 1, 0, 0, 0, 0, 18, 1, 0, 0, 0, 0, 20, 1, 0, 0, 0, 0, 22, 1, 0, 0, 0, 0, 24, 1, 0, 0, 0, 0, 26, 1, 0, 0, 0, 0, 28, 1, 0, 0, 0, 0, 30, 1, 0, 0, 0, 0, 32, 1, 0, 0, 0, 0, 34, 1, 0, 0, 0, 0, 36, 1, 0, 0, 0, 0, 38, 1, 0, 0, 0, 0, 40, 1, 0, 0, 0, 0, 42, 1, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 46, 1, 0, 0, 0, 0, 48, 1, 0, 0, 0, 0, 50, 1, 0, 0, 0, 0, 52, 1, 0, 0, 0, 0, 54, 1, 0, 0, 0, 0, 56, 1, 0, 0, 0, 0, 58, 1, 0, 0, 0, 0, 60, 1, 0, 0, 0, 0, 62, 1, 0, 0, 0, 0, 64, 1, 0, 0, 0, 0, 68, 1, 0, 0, 0, 1, 70, 1, 0, 0, 0, 1, 72, 1, 0, 0, 0, 1, 74, 1, 0, 0, 0, 1, 76, 1, 0, 0, 0, 1, 78, 1, 0, 0, 0, 2, 80, 1, 0, 0, 0, 2, 102, 1, 0, 0, 0, 2, 104, 1, 0, 0, 0, 2, 106, 1, 0, 0, 0, 2, 108, 1, 0, 0, 0, 2, 110, 1, 0, 0, 0, 2, 112, 1, 0, 0, 0, 2, 114, 1, 0, 0, 0, 2, 116, 1, 0, 0, 0, 2, 118, 1, 0, 0, 0, 2, 120, 1, 0, 0, 0, 2, 122, 1, 0, 0, 0, 2, 124, 1, 0, 0, 0, 2, 126, 1, 0, 0, 0, 2, 128, 1, 0, 0, 0, 2, 130, 1, 0, 0, 0, 2, 132, 1, 0, 0, 0, 2, 134, 1, 0, 0, 0, 2, 136, 1, 0, 0, 0, 2, 138, 1, 0, 0, 0, 2, 140, 1, 0, 0, 0, 2, 142, 1, 0, 0, 0, 2, 144, 1, 0, 0, 0, 2, 146, 1, 0, 0, 0, 2, 148, 1, 0, 0, 0, 2, 150, 1, 0, 0, 0, 2, 152, 1, 0, 0, 0, 2, 154, 1, 0, 0, 0, 2, 156, 1, 0, 0, 0, 2, 158, 1, 0, 0, 0, 2, 160, 1, 0, 0, 0, 2, 162, 1, 0, 0, 0, 2, 164, 1, 0, 0, 0, 2, 166, 1, 0, 0, 0, 2, 168, 1, 0, 0, 0, 2, 170, 1, 0, 0, 0, 2, 172, 1, 0, 0, 0, 2, 174, 1, 0, 0, 0, 2, 176, 1, 0, 0, 0, 2, 178, 1, 0, 0, 0, 2, 180, 1, 0, 0, 0, 2, 182, 1, 0, 0, 0, 2, 184, 1, 0, 0, 0, 2, 186, 1, 0, 0, 0, 2, 190, 1, 0, 0, 0, 2, 192, 1, 0, 0, 0, 2, 194, 1, 0, 0, 0, 2, 196, 1, 0, 0, 0, 3, 198, 1, 0, 0, 0, 3, 200, 1, 0, 0, 0, 3, 202, 1, 0, 0, 0, 3, 204, 1, 0, 0, 0, 3, 206, 1, 0, 0, 0, 3, 208, 1, 0, 0, 0, 3, 210, 1, 0, 0, 0, 3, 212, 1, 0, 0, 0, 3, 214, 1, 0, 0, 0, 3, 216, 1, 0, 0, 0, 3, 218, 1, 0, 0, 0, 3, 220, 1, 0, 0, 0, 4, 222, 1, 0, 0, 0, 4, 224, 1, 0, 0, 0, 4, 226, 1, 0, 0, 0, 4, 232, 1, 0, 0, 0, 4, 234, 1, 0, 0, 0, 4, 236, 1, 0, 0, 0, 4, 238, 1, 0, 0, 0, 5, 240, 1, 0, 0, 0, 5, 242, 1, 0, 0, 0, 5, 244, 1, 0, 0, 0, 5, 246, 1, 0, 0, 0, 5, 248, 1, 0, 0, 0, 5, 250, 1, 0, 0, 0, 5, 252, 1, 0, 0, 0, 5, 254, 1, 0, 0, 0, 5, 256, 1, 0, 0, 0, 6, 258, 1, 0, 0, 0, 6, 260, 1, 0, 0, 0, 6, 262, 1, 0, 0, 0, 6, 264, 1, 0, 0, 0, 6, 268, 1, 0, 0, 0, 6, 270, 1, 0, 0, 0, 6, 272, 1, 0, 0, 0, 6, 274, 1, 0, 0, 0, 6, 276, 1, 0, 0, 0, 7, 278, 1, 0, 0, 0, 7, 280, 1, 0, 0, 0, 7, 282, 1, 0, 0, 0, 7, 284, 1, 0, 0, 0, 7, 286, 1, 0, 0, 0, 7, 288, 1, 0, 0, 0, 7, 290, 1, 0, 0, 0, 7, 292, 1, 0, 0, 0, 7, 294, 1, 0, 0, 0, 7, 296, 1, 0, 0, 0, 8, 298, 1, 0, 0, 0, 8, 300, 1, 0, 0, 0, 8, 302, 1, 0, 0, 0, 8, 304, 1, 0, 0, 0, 8, 306, 1, 0, 0, 0, 8, 308, 1, 0, 0, 0, 8, 310, 1, 0, 0, 0, 8, 312, 1, 0, 0, 0, 8, 314, 1, 0, 0, 0, 8, 316, 1, 0, 0, 0, 9, 318, 1, 0, 0, 0, 9, 320, 1, 0, 0, 0, 9, 322, 1, 0, 0, 0, 9, 324, 1, 0, 0, 0, 9, 326, 1, 0, 0, 0, 9, 328, 1, 0, 0, 0, 9, 330, 1, 0, 0, 0, 10, 332, 1, 0, 0, 0, 10, 334, 1, 0, 0, 0, 10, 336, 1, 0, 0, 0, 10, 338, 1, 0, 0, 0, 10, 340, 1, 0, 0, 0, 10, 342, 1, 0, 0, 0, 10, 344, 1, 0, 0, 0, 11, 346, 1, 0, 0, 0, 11, 348, 1, 0, 0, 0, 11, 350, 1, 0, 0, 0, 11, 352, 1, 0, 0, 0, 11, 354, 1, 0, 0, 0, 12, 356, 1, 0, 0, 0, 12, 358, 1, 0, 0, 0, 12, 360, 1, 0, 0, 0, 12, 362, 1, 0, 0, 0, 12, 364, 1, 0, 0, 0, 13, 366, 1, 0, 0, 0, 13, 368, 1, 0, 0, 0, 13, 370, 1, 0, 0, 0, 13, 372, 1, 0, 0, 0, 13, 374, 1, 0, 0, 0, 13, 376, 1, 0, 0, 0, 14, 378, 1, 0, 0, 0, 14, 380, 1, 0, 0, 0, 14, 382, 1, 0, 0, 0, 14, 384, 1, 0, 0, 0, 14, 386, 1, 0, 0, 0, 14, 388, 1, 0, 0, 0, 15, 390, 1, 0, 0, 0, 15, 392, 1, 0, 0, 0, 15, 394, 1, 0, 0, 0, 15, 396, 1, 0, 0, 0, 15, 398, 1, 0, 0, 0, 15, 400, 1, 0, 0, 0, 15, 402, 1, 0, 0, 0, 15, 404, 1, 0, 0, 0, 15, 406, 1, 0, 0, 0, 16, 408, 1, 0, 0, 0, 18, 418, 1, 0, 0, 0, 20, 425, 1, 0, 0, 0, 22, 434, 1, 0, 0, 0, 24, 441, 1, 0, 0, 0, 26, 451, 1, 0, 0, 0, 28, 458, 1, 0, 0, 0, 30, 465, 1, 0, 0, 0, 32, 479, 1, 0, 0, 0, 34, 486, 1, 0, 0, 0, 36, 494, 1, 0, 0, 0, 38, 503, 1, 0, 0, 0, 40, 510, 1, 0, 0, 0, 42, 520, 1, 0, 0, 0, 44, 532, 1, 0, 0, 0, 46, 541, 1, 0, 0, 0, 48, 547, 1, 0, 0, 0, 50, 554, 1, 0, 0, 0, 52, 561, 1, 0, 0, 0, 54, 569, 1, 0, 0, 0, 56, 577, 1, 0, 0, 0, 58, 586, 1, 0, 0, 0, 60, 592, 1, 0, 0, 0, 62, 609, 1, 0, 0, 0, 64, 625, 1, 0, 0, 0, 66, 634, 1, 0, 0, 0, 68, 637, 1, 0, 0, 0, 70, 641, 1, 0, 0, 0, 72, 646, 1, 0, 0, 0, 74, 651, 1, 0, 0, 0, 76, 655, 1, 0, 0, 0, 78, 659, 1, 0, 0, 0, 80, 663, 1, 0, 0, 0, 82, 667, 1, 0, 0, 0, 84, 669, 1, 0, 0, 0, 86, 671, 1, 0, 0, 0, 88, 674, 1, 0, 0, 0, 90, 676, 1, 0, 0, 0, 92, 685, 1, 0, 0, 0, 94, 687, 1, 0, 0, 0, 96, 692, 1, 0, 0, 0, 98, 694, 1, 0, 0, 0, 100, 699, 1, 0, 0, 0, 102, 730, 1, 0, 0, 0, 104, 733, 1, 0, 0, 0, 106, 779, 1, 0, 0, 0, 108, 781, 1, 0, 0, 0, 110, 784, 1, 0, 0, 0, 112, 788, 1, 0, 0, 0, 114, 792, 1, 0, 0, 0, 116, 794, 1, 0, 0, 0, 118, 797, 1, 0, 0, 0, 120, 799, 1, 0, 0, 0, 122, 804, 1, 0, 0, 0, 124, 806, 1, 0, 0, 0, 126, 812, 1, 0, 0, 0, 128, 818, 1, 0, 0, 0, 130, 821, 1, 0, 0, 0, 132, 824, 1, 0, 0, 0, 134, 829, 1, 0, 0, 0, 136, 834, 1, 0, 0, 0, 138, 836, 1, 0, 0, 0, 140, 842, 1, 0, 0, 0, 142, 846, 1, 0, 0, 0, 144, 851, 1, 0, 0, 0, 146, 857, 1, 0, 0, 0, 148, 860, 1, 0, 0, 0, 150, 862, 1, 0, 0, 0, 152, 868, 1, 0, 0, 0, 154, 870, 1, 0, 0, 0, 156, 875, 1, 0, 0, 0, 158, 878, 1, 0, 0, 0, 160, 881, 1, 0, 0, 0, 162, 884, 1, 0, 0, 0, 164, 886, 1, 0, 0, 0, 166, 889, 1, 0, 0, 0, 168, 891, 1, 0, 0, 0, 170, 894, 1, 0, 0, 0, 172, 896, 1, 0, 0, 0, 174, 898, 1, 0, 0, 0, 176, 900, 1, 0, 0, 0, 178, 902, 1, 0, 0, 0, 180, 921, 1, 0, 0, 0, 182, 923, 1, 0, 0, 0, 184, 928, 1, 0, 0, 0, 186, 949, 1, 0, 0, 0, 188, 951, 1, 0, 0, 0, 190, 959, 1, 0, 0, 0, 192, 961, 1, 0, 0, 0, 194, 965, 1, 0, 0, 0, 196, 969, 1, 0, 0, 0, 198, 973, 1, 0, 0, 0, 200, 978, 1, 0, 0, 0, 202, 982, 1, 0, 0, 0, 204, 986, 1, 0, 0, 0, 206, 990, 1, 0, 0, 0, 208, 994, 1, 0, 0, 0, 210, 998, 1, 0, 0, 0, 212, 1007, 1, 0, 0, 0, 214, 1011, 1, 0, 0, 0, 216, 1015, 1, 0, 0, 0, 218, 1019, 1, 0, 0, 0, 220, 1023, 1, 0, 0, 0, 222, 1027, 1, 0, 0, 0, 224, 1032, 1, 0, 0, 0, 226, 1036, 1, 0, 0, 0, 228, 1044, 1, 0, 0, 0, 230, 1065, 1, 0, 0, 0, 232, 1069, 1, 0, 0, 0, 234, 1073, 1, 0, 0, 0, 236, 1077, 1, 0, 0, 0, 238, 1081, 1, 0, 0, 0, 240, 1085, 1, 0, 0, 0, 242, 1090, 1, 0, 0, 0, 244, 1094, 1, 0, 0, 0, 246, 1098, 1, 0, 0, 0, 248, 1102, 1, 0, 0, 0, 250, 1105, 1, 0, 0, 0, 252, 1109, 1, 0, 0, 0, 254, 1113, 1, 0, 0, 0, 256, 1117, 1, 0, 0, 0, 258, 1121, 1, 0, 0, 0, 260, 1126, 1, 0, 0, 0, 262, 1131, 1, 0, 0, 0, 264, 1136, 1, 0, 0, 0, 266, 1143, 1, 0, 0, 0, 268, 1152, 1, 0, 0, 0, 270, 1159, 1, 0, 0, 0, 272, 1163, 1, 0, 0, 0, 274, 1167, 1, 0, 0, 0, 276, 1171, 1, 0, 0, 0, 278, 1175, 1, 0, 0, 0, 280, 1181, 1, 0, 0, 0, 282, 1185, 1, 0, 0, 0, 284, 1189, 1, 0, 0, 0, 286, 1193, 1, 0, 0, 0, 288, 1197, 1, 0, 0, 0, 290, 1201, 1, 0, 0, 0, 292, 1205, 1, 0, 0, 0, 294, 1209, 1, 0, 0, 0, 296, 1213, 1, 0, 0, 0, 298, 1217, 1, 0, 0, 0, 300, 1222, 1, 0, 0, 0, 302, 1226, 1, 0, 0, 0, 304, 1230, 1, 0, 0, 0, 306, 1234, 1, 0, 0, 0, 308, 1239, 1, 0, 0, 0, 310, 1243, 1, 0, 0, 0, 312, 1247, 1, 0, 0, 0, 314, 1251, 1, 0, 0, 0, 316, 1255, 1, 0, 0, 0, 318, 1259, 1, 0, 0, 0, 320, 1265, 1, 0, 0, 0, 322, 1269, 1, 0, 0, 0, 324, 1273, 1, 0, 0, 0, 326, 1277, 1, 0, 0, 0, 328, 1281, 1, 0, 0, 0, 330, 1285, 1, 0, 0, 0, 332, 1289, 1, 0, 0, 0, 334, 1294, 1, 0, 0, 0, 336, 1298, 1, 0, 0, 0, 338, 1302, 1, 0, 0, 0, 340, 1306, 1, 0, 0, 0, 342, 1310, 1, 0, 0, 0, 344, 1314, 1, 0, 0, 0, 346, 1318, 1, 0, 0, 0, 348, 1323, 1, 0, 0, 0, 350, 1328, 1, 0, 0, 0, 352, 1332, 1, 0, 0, 0, 354, 1336, 1, 0, 0, 0, 356, 1340, 1, 0, 0, 0, 358, 1345, 1, 0, 0, 0, 360, 1355, 1, 0, 0, 0, 362, 1359, 1, 0, 0, 0, 364, 1363, 1, 0, 0, 0, 366, 1367, 1, 0, 0, 0, 368, 1372, 1, 0, 0, 0, 370, 1379, 1, 0, 0, 0, 372, 1383, 1, 0, 0, 0, 374, 1387, 1, 0, 0, 0, 376, 1391, 1, 0, 0, 0, 378, 1395, 1, 0, 0, 0, 380, 1400, 1, 0, 0, 0, 382, 1406, 1, 0, 0, 0, 384, 1412, 1, 0, 0, 0, 386, 1416, 1, 0, 0, 0, 388, 1420, 1, 0, 0, 0, 390, 1424, 1, 0, 0, 0, 392, 1430, 1, 0, 0, 0, 394, 1436, 1, 0, 0, 0, 396, 1440, 1, 0, 0, 0, 398, 1444, 1, 0, 0, 0, 400, 1448, 1, 0, 0, 0, 402, 1454, 1, 0, 0, 0, 404, 1460, 1, 0, 0, 0, 406, 1466, 1, 0, 0, 0, 408, 409, 5, 100, 0, 0, 409, 410, 5, 105, 0, 0, 410, 411, 5, 115, 0, 0, 411, 412, 5, 115, 0, 0, 412, 413, 5, 101, 0, 0, 413, 414, 5, 99, 0, 0, 414, 415, 5, 116, 0, 0, 415, 416, 1, 0, 0, 0, 416, 417, 6, 0, 0, 0, 417, 17, 1, 0, 0, 0, 418, 419, 5, 100, 0, 0, 419, 420, 5, 114, 0, 0, 420, 421, 5, 111, 0, 0, 421, 422, 5, 112, 0, 0, 422, 423, 1, 0, 0, 0, 423, 424, 6, 1, 1, 0, 424, 19, 1, 0, 0, 0, 425, 426, 5, 101, 0, 0, 426, 427, 5, 110, 0, 0, 427, 428, 5, 114, 0, 0, 428, 429, 5, 105, 0, 0, 429, 430, 5, 99, 0, 0, 430, 431, 5, 104, 0, 0, 431, 432, 1, 0, 0, 0, 432, 433, 6, 2, 2, 0, 433, 21, 1, 0, 0, 0, 434, 435, 5, 101, 0, 0, 435, 436, 5, 118, 0, 0, 436, 437, 5, 97, 0, 0, 437, 438, 5, 108, 0, 0, 438, 439, 1, 0, 0, 0, 439, 440, 6, 3, 0, 0, 440, 23, 1, 0, 0, 0, 441, 442, 5, 101, 0, 0, 442, 443, 5, 120, 0, 0, 443, 444, 5, 112, 0, 0, 444, 445, 5, 108, 0, 0, 445, 446, 5, 97, 0, 0, 446, 447, 5, 105, 0, 0, 447, 448, 5, 110, 0, 0, 448, 449, 1, 0, 0, 0, 449, 450, 6, 4, 3, 0, 450, 25, 1, 0, 0, 0, 451, 452, 5, 102, 0, 0, 452, 453, 5, 114, 0, 0, 453, 454, 5, 111, 0, 0, 454, 455, 5, 109, 0, 0, 455, 456, 1, 0, 0, 0, 456, 457, 6, 5, 4, 0, 457, 27, 1, 0, 0, 0, 458, 459, 5, 103, 0, 0, 459, 460, 5, 114, 0, 0, 460, 461, 5, 111, 0, 0, 461, 462, 5, 107, 0, 0, 462, 463, 1, 0, 0, 0, 463, 464, 6, 6, 0, 0, 464, 29, 1, 0, 0, 0, 465, 466, 5, 105, 0, 0, 466, 467, 5, 110, 0, 0, 467, 468, 5, 108, 0, 0, 468, 469, 5, 105, 0, 0, 469, 470, 5, 110, 0, 0, 470, 471, 5, 101, 0, 0, 471, 472, 5, 115, 0, 0, 472, 473, 5, 116, 0, 0, 473, 474, 5, 97, 0, 0, 474, 475, 5, 116, 0, 0, 475, 476, 5, 115, 0, 0, 476, 477, 1, 0, 0, 0, 477, 478, 6, 7, 0, 0, 478, 31, 1, 0, 0, 0, 479, 480, 5, 107, 0, 0, 480, 481, 5, 101, 0, 0, 481, 482, 5, 101, 0, 0, 482, 483, 5, 112, 0, 0, 483, 484, 1, 0, 0, 0, 484, 485, 6, 8, 1, 0, 485, 33, 1, 0, 0, 0, 486, 487, 5, 108, 0, 0, 487, 488, 5, 105, 0, 0, 488, 489, 5, 109, 0, 0, 489, 490, 5, 105, 0, 0, 490, 491, 5, 116, 0, 0, 491, 492, 1, 0, 0, 0, 492, 493, 6, 9, 0, 0, 493, 35, 1, 0, 0, 0, 494, 495, 5, 108, 0, 0, 495, 496, 5, 111, 0, 0, 496, 497, 5, 111, 0, 0, 497, 498, 5, 107, 0, 0, 498, 499, 5, 117, 0, 0, 499, 500, 5, 112, 0, 0, 500, 501, 1, 0, 0, 0, 501, 502, 6, 10, 5, 0, 502, 37, 1, 0, 0, 0, 503, 504, 5, 109, 0, 0, 504, 505, 5, 101, 0, 0, 505, 506, 5, 116, 0, 0, 506, 507, 5, 97, 0, 0, 507, 508, 1, 0, 0, 0, 508, 509, 6, 11, 6, 0, 509, 39, 1, 0, 0, 0, 510, 511, 5, 109, 0, 0, 511, 512, 5, 101, 0, 0, 512, 513, 5, 116, 0, 0, 513, 514, 5, 114, 0, 0, 514, 515, 5, 105, 0, 0, 515, 516, 5, 99, 0, 0, 516, 517, 5, 115, 0, 0, 517, 518, 1, 0, 0, 0, 518, 519, 6, 12, 7, 0, 519, 41, 1, 0, 0, 0, 520, 521, 5, 109, 0, 0, 521, 522, 5, 118, 0, 0, 522, 523, 5, 95, 0, 0, 523, 524, 5, 101, 0, 0, 524, 525, 5, 120, 0, 0, 525, 526, 5, 112, 0, 0, 526, 527, 5, 97, 0, 0, 527, 528, 5, 110, 0, 0, 528, 529, 5, 100, 0, 0, 529, 530, 1, 0, 0, 0, 530, 531, 6, 13, 8, 0, 531, 43, 1, 0, 0, 0, 532, 533, 5, 114, 0, 0, 533, 534, 5, 101, 0, 0, 534, 535, 5, 110, 0, 0, 535, 536, 5, 97, 0, 0, 536, 537, 5, 109, 0, 0, 537, 538, 5, 101, 0, 0, 538, 539, 1, 0, 0, 0, 539, 540, 6, 14, 9, 0, 540, 45, 1, 0, 0, 0, 541, 542, 5, 114, 0, 0, 542, 543, 5, 111, 0, 0, 543, 544, 5, 119, 0, 0, 544, 545, 1, 0, 0, 0, 545, 546, 6, 15, 0, 0, 546, 47, 1, 0, 0, 0, 547, 548, 5, 115, 0, 0, 548, 549, 5, 104, 0, 0, 549, 550, 5, 111, 0, 0, 550, 551, 5, 119, 0, 0, 551, 552, 1, 0, 0, 0, 552, 553, 6, 16, 10, 0, 553, 49, 1, 0, 0, 0, 554, 555, 5, 115, 0, 0, 555, 556, 5, 111, 0, 0, 556, 557, 5, 114, 0, 0, 557, 558, 5, 116, 0, 0, 558, 559, 1, 0, 0, 0, 559, 560, 6, 17, 0, 0, 560, 51, 1, 0, 0, 0, 561, 562, 5, 115, 0, 0, 562, 563, 5, 116, 0, 0, 563, 564, 5, 97, 0, 0, 564, 565, 5, 116, 0, 0, 565, 566, 5, 115, 0, 0, 566, 567, 1, 0, 0, 0, 567, 568, 6, 18, 0, 0, 568, 53, 1, 0, 0, 0, 569, 570, 5, 119, 0, 0, 570, 571, 5, 104, 0, 0, 571, 572, 5, 101, 0, 0, 572, 573, 5, 114, 0, 0, 573, 574, 5, 101, 0, 0, 574, 575, 1, 0, 0, 0, 575, 576, 6, 19, 0, 0, 576, 55, 1, 0, 0, 0, 577, 578, 5, 109, 0, 0, 578, 579, 5, 97, 0, 0, 579, 580, 5, 116, 0, 0, 580, 581, 5, 99, 0, 0, 581, 582, 5, 104, 0, 0, 582, 583, 1, 0, 0, 0, 583, 584, 6, 20, 0, 0, 584, 57, 1, 0, 0, 0, 585, 587, 8, 0, 0, 0, 586, 585, 1, 0, 0, 0, 587, 588, 1, 0, 0, 0, 588, 586, 1, 0, 0, 0, 588, 589, 1, 0, 0, 0, 589, 590, 1, 0, 0, 0, 590, 591, 6, 21, 0, 0, 591, 59, 1, 0, 0, 0, 592, 593, 5, 47, 0, 0, 593, 594, 5, 47, 0, 0, 594, 598, 1, 0, 0, 0, 595, 597, 8, 1, 0, 0, 596, 595, 1, 0, 0, 0, 597, 600, 1, 0, 0, 0, 598, 596, 1, 0, 0, 0, 598, 599, 1, 0, 0, 0, 599, 602, 1, 0, 0, 0, 600, 598, 1, 0, 0, 0, 601, 603, 5, 13, 0, 0, 602, 601, 1, 0, 0, 0, 602, 603, 1, 0, 0, 0, 603, 605, 1, 0, 0, 0, 604, 606, 5, 10, 0, 0, 605, 604, 1, 0, 0, 0, 605, 606, 1, 0, 0, 0, 606, 607, 1, 0, 0, 0, 607, 608, 6, 22, 11, 0, 608, 61, 1, 0, 0, 0, 609, 610, 5, 47, 0, 0, 610, 611, 5, 42, 0, 0, 611, 616, 1, 0, 0, 0, 612, 615, 3, 62, 23, 0, 613, 615, 9, 0, 0, 0, 614, 612, 1, 0, 0, 0, 614, 613, 1, 0, 0, 0, 615, 618, 1, 0, 0, 0, 616, 617, 1, 0, 0, 0, 616, 614, 1, 0, 0, 0, 617, 619, 1, 0, 0, 0, 618, 616, 1, 0, 0, 0, 619, 620, 5, 42, 0, 0, 620, 621, 5, 47, 0, 0, 621, 622, 1, 0, 0, 0, 622, 623, 6, 23, 11, 0, 623, 63, 1, 0, 0, 0, 624, 626, 7, 2, 0, 0, 625, 624, 1, 0, 0, 0, 626, 627, 1, 0, 0, 0, 627, 625, 1, 0, 0, 0, 627, 628, 1, 0, 0, 0, 628, 629, 1, 0, 0, 0, 629, 630, 6, 24, 11, 0, 630, 65, 1, 0, 0, 0, 631, 635, 8, 3, 0, 0, 632, 633, 5, 47, 0, 0, 633, 635, 8, 4, 0, 0, 634, 631, 1, 0, 0, 0, 634, 632, 1, 0, 0, 0, 635, 67, 1, 0, 0, 0, 636, 638, 3, 66, 25, 0, 637, 636, 1, 0, 0, 0, 638, 639, 1, 0, 0, 0, 639, 637, 1, 0, 0, 0, 639, 640, 1, 0, 0, 0, 640, 69, 1, 0, 0, 0, 641, 642, 3, 182, 83, 0, 642, 643, 1, 0, 0, 0, 643, 644, 6, 27, 12, 0, 644, 645, 6, 27, 13, 0, 645, 71, 1, 0, 0, 0, 646, 647, 3, 80, 32, 0, 647, 648, 1, 0, 0, 0, 648, 649, 6, 28, 14, 0, 649, 650, 6, 28, 15, 0, 650, 73, 1, 0, 0, 0, 651, 652, 3, 64, 24, 0, 652, 653, 1, 0, 0, 0, 653, 654, 6, 29, 11, 0, 654, 75, 1, 0, 0, 0, 655, 656, 3, 60, 22, 0, 656, 657, 1, 0, 0, 0, 657, 658, 6, 30, 11, 0, 658, 77, 1, 0, 0, 0, 659, 660, 3, 62, 23, 0, 660, 661, 1, 0, 0, 0, 661, 662, 6, 31, 11, 0, 662, 79, 1, 0, 0, 0, 663, 664, 5, 124, 0, 0, 664, 665, 1, 0, 0, 0, 665, 666, 6, 32, 15, 0, 666, 81, 1, 0, 0, 0, 667, 668, 7, 5, 0, 0, 668, 83, 1, 0, 0, 0, 669, 670, 7, 6, 0, 0, 670, 85, 1, 0, 0, 0, 671, 672, 5, 92, 0, 0, 672, 673, 7, 7, 0, 0, 673, 87, 1, 0, 0, 0, 674, 675, 8, 8, 0, 0, 675, 89, 1, 0, 0, 0, 676, 678, 7, 9, 0, 0, 677, 679, 7, 10, 0, 0, 678, 677, 1, 0, 0, 0, 678, 679, 1, 0, 0, 0, 679, 681, 1, 0, 0, 0, 680, 682, 3, 82, 33, 0, 681, 680, 1, 0, 0, 0, 682, 683, 1, 0, 0, 0, 683, 681, 1, 0, 0, 0, 683, 684, 1, 0, 0, 0, 684, 91, 1, 0, 0, 0, 685, 686, 5, 64, 0, 0, 686, 93, 1, 0, 0, 0, 687, 688, 5, 96, 0, 0, 688, 95, 1, 0, 0, 0, 689, 693, 8, 11, 0, 0, 690, 691, 5, 96, 0, 0, 691, 693, 5, 96, 0, 0, 692, 689, 1, 0, 0, 0, 692, 690, 1, 0, 0, 0, 693, 97, 1, 0, 0, 0, 694, 695, 5, 95, 0, 0, 695, 99, 1, 0, 0, 0, 696, 700, 3, 84, 34, 0, 697, 700, 3, 82, 33, 0, 698, 700, 3, 98, 41, 0, 699, 696, 1, 0, 0, 0, 699, 697, 1, 0, 0, 0, 699, 698, 1, 0, 0, 0, 700, 101, 1, 0, 0, 0, 701, 706, 5, 34, 0, 0, 702, 705, 3, 86, 35, 0, 703, 705, 3, 88, 36, 0, 704, 702, 1, 0, 0, 0, 704, 703, 1, 0, 0, 0, 705, 708, 1, 0, 0, 0, 706, 704, 1, 0, 0, 0, 706, 707, 1, 0, 0, 0, 707, 709, 1, 0, 0, 0, 708, 706, 1, 0, 0, 0, 709, 731, 5, 34, 0, 0, 710, 711, 5, 34, 0, 0, 711, 712, 5, 34, 0, 0, 712, 713, 5, 34, 0, 0, 713, 717, 1, 0, 0, 0, 714, 716, 8, 1, 0, 0, 715, 714, 1, 0, 0, 0, 716, 719, 1, 0, 0, 0, 717, 718, 1, 0, 0, 0, 717, 715, 1, 0, 0, 0, 718, 720, 1, 0, 0, 0, 719, 717, 1, 0, 0, 0, 720, 721, 5, 34, 0, 0, 721, 722, 5, 34, 0, 0, 722, 723, 5, 34, 0, 0, 723, 725, 1, 0, 0, 0, 724, 726, 5, 34, 0, 0, 725, 724, 1, 0, 0, 0, 725, 726, 1, 0, 0, 0, 726, 728, 1, 0, 0, 0, 727, 729, 5, 34, 0, 0, 728, 727, 1, 0, 0, 0, 728, 729, 1, 0, 0, 0, 729, 731, 1, 0, 0, 0, 730, 701, 1, 0, 0, 0, 730, 710, 1, 0, 0, 0, 731, 103, 1, 0, 0, 0, 732, 734, 3, 82, 33, 0, 733, 732, 1, 0, 0, 0, 734, 735, 1, 0, 0, 0, 735, 733, 1, 0, 0, 0, 735, 736, 1, 0, 0, 0, 736, 105, 1, 0, 0, 0, 737, 739, 3, 82, 33, 0, 738, 737, 1, 0, 0, 0, 739, 740, 1, 0, 0, 0, 740, 738, 1, 0, 0, 0, 740, 741, 1, 0, 0, 0, 741, 742, 1, 0, 0, 0, 742, 746, 3, 122, 53, 0, 743, 745, 3, 82, 33, 0, 744, 743, 1, 0, 0, 0, 745, 748, 1, 0, 0, 0, 746, 744, 1, 0, 0, 0, 746, 747, 1, 0, 0, 0, 747, 780, 1, 0, 0, 0, 748, 746, 1, 0, 0, 0, 749, 751, 3, 122, 53, 0, 750, 752, 3, 82, 33, 0, 751, 750, 1, 0, 0, 0, 752, 753, 1, 0, 0, 0, 753, 751, 1, 0, 0, 0, 753, 754, 1, 0, 0, 0, 754, 780, 1, 0, 0, 0, 755, 757, 3, 82, 33, 0, 756, 755, 1, 0, 0, 0, 757, 758, 1, 0, 0, 0, 758, 756, 1, 0, 0, 0, 758, 759, 1, 0, 0, 0, 759, 767, 1, 0, 0, 0, 760, 764, 3, 122, 53, 0, 761, 763, 3, 82, 33, 0, 762, 761, 1, 0, 0, 0, 763, 766, 1, 0, 0, 0, 764, 762, 1, 0, 0, 0, 764, 765, 1, 0, 0, 0, 765, 768, 1, 0, 0, 0, 766, 764, 1, 0, 0, 0, 767, 760, 1, 0, 0, 0, 767, 768, 1, 0, 0, 0, 768, 769, 1, 0, 0, 0, 769, 770, 3, 90, 37, 0, 770, 780, 1, 0, 0, 0, 771, 773, 3, 122, 53, 0, 772, 774, 3, 82, 33, 0, 773, 772, 1, 0, 0, 0, 774, 775, 1, 0, 0, 0, 775, 773, 1, 0, 0, 0, 775, 776, 1, 0, 0, 0, 776, 777, 1, 0, 0, 0, 777, 778, 3, 90, 37, 0, 778, 780, 1, 0, 0, 0, 779, 738, 1, 0, 0, 0, 779, 749, 1, 0, 0, 0, 779, 756, 1, 0, 0, 0, 779, 771, 1, 0, 0, 0, 780, 107, 1, 0, 0, 0, 781, 782, 5, 98, 0, 0, 782, 783, 5, 121, 0, 0, 783, 109, 1, 0, 0, 0, 784, 785, 5, 97, 0, 0, 785, 786, 5, 110, 0, 0, 786, 787, 5, 100, 0, 0, 787, 111, 1, 0, 0, 0, 788, 789, 5, 97, 0, 0, 789, 790, 5, 115, 0, 0, 790, 791, 5, 99, 0, 0, 791, 113, 1, 0, 0, 0, 792, 793, 5, 61, 0, 0, 793, 115, 1, 0, 0, 0, 794, 795, 5, 58, 0, 0, 795, 796, 5, 58, 0, 0, 796, 117, 1, 0, 0, 0, 797, 798, 5, 44, 0, 0, 798, 119, 1, 0, 0, 0, 799, 800, 5, 100, 0, 0, 800, 801, 5, 101, 0, 0, 801, 802, 5, 115, 0, 0, 802, 803, 5, 99, 0, 0, 803, 121, 1, 0, 0, 0, 804, 805, 5, 46, 0, 0, 805, 123, 1, 0, 0, 0, 806, 807, 5, 102, 0, 0, 807, 808, 5, 97, 0, 0, 808, 809, 5, 108, 0, 0, 809, 810, 5, 115, 0, 0, 810, 811, 5, 101, 0, 0, 811, 125, 1, 0, 0, 0, 812, 813, 5, 102, 0, 0, 813, 814, 5, 105, 0, 0, 814, 815, 5, 114, 0, 0, 815, 816, 5, 115, 0, 0, 816, 817, 5, 116, 0, 0, 817, 127, 1, 0, 0, 0, 818, 819, 5, 105, 0, 0, 819, 820, 5, 110, 0, 0, 820, 129, 1, 0, 0, 0, 821, 822, 5, 105, 0, 0, 822, 823, 5, 115, 0, 0, 823, 131, 1, 0, 0, 0, 824, 825, 5, 108, 0, 0, 825, 826, 5, 97, 0, 0, 826, 827, 5, 115, 0, 0, 827, 828, 5, 116, 0, 0, 828, 133, 1, 0, 0, 0, 829, 830, 5, 108, 0, 0, 830, 831, 5, 105, 0, 0, 831, 832, 5, 107, 0, 0, 832, 833, 5, 101, 0, 0, 833, 135, 1, 0, 0, 0, 834, 835, 5, 40, 0, 0, 835, 137, 1, 0, 0, 0, 836, 837, 5, 109, 0, 0, 837, 838, 5, 97, 0, 0, 838, 839, 5, 116, 0, 0, 839, 840, 5, 99, 0, 0, 840, 841, 5, 104, 0, 0, 841, 139, 1, 0, 0, 0, 842, 843, 5, 110, 0, 0, 843, 844, 5, 111, 0, 0, 844, 845, 5, 116, 0, 0, 845, 141, 1, 0, 0, 0, 846, 847, 5, 110, 0, 0, 847, 848, 5, 117, 0, 0, 848, 849, 5, 108, 0, 0, 849, 850, 5, 108, 0, 0, 850, 143, 1, 0, 0, 0, 851, 852, 5, 110, 0, 0, 852, 853, 5, 117, 0, 0, 853, 854, 5, 108, 0, 0, 854, 855, 5, 108, 0, 0, 855, 856, 5, 115, 0, 0, 856, 145, 1, 0, 0, 0, 857, 858, 5, 111, 0, 0, 858, 859, 5, 114, 0, 0, 859, 147, 1, 0, 0, 0, 860, 861, 5, 63, 0, 0, 861, 149, 1, 0, 0, 0, 862, 863, 5, 114, 0, 0, 863, 864, 5, 108, 0, 0, 864, 865, 5, 105, 0, 0, 865, 866, 5, 107, 0, 0, 866, 867, 5, 101, 0, 0, 867, 151, 1, 0, 0, 0, 868, 869, 5, 41, 0, 0, 869, 153, 1, 0, 0, 0, 870, 871, 5, 116, 0, 0, 871, 872, 5, 114, 0, 0, 872, 873, 5, 117, 0, 0, 873, 874, 5, 101, 0, 0, 874, 155, 1, 0, 0, 0, 875, 876, 5, 61, 0, 0, 876, 877, 5, 61, 0, 0, 877, 157, 1, 0, 0, 0, 878, 879, 5, 61, 0, 0, 879, 880, 5, 126, 0, 0, 880, 159, 1, 0, 0, 0, 881, 882, 5, 33, 0, 0, 882, 883, 5, 61, 0, 0, 883, 161, 1, 0, 0, 0, 884, 885, 5, 60, 0, 0, 885, 163, 1, 0, 0, 0, 886, 887, 5, 60, 0, 0, 887, 888, 5, 61, 0, 0, 888, 165, 1, 0, 0, 0, 889, 890, 5, 62, 0, 0, 890, 167, 1, 0, 0, 0, 891, 892, 5, 62, 0, 0, 892, 893, 5, 61, 0, 0, 893, 169, 1, 0, 0, 0, 894, 895, 5, 43, 0, 0, 895, 171, 1, 0, 0, 0, 896, 897, 5, 45, 0, 0, 897, 173, 1, 0, 0, 0, 898, 899, 5, 42, 0, 0, 899, 175, 1, 0, 0, 0, 900, 901, 5, 47, 0, 0, 901, 177, 1, 0, 0, 0, 902, 903, 5, 37, 0, 0, 903, 179, 1, 0, 0, 0, 904, 907, 3, 148, 66, 0, 905, 908, 3, 84, 34, 0, 906, 908, 3, 98, 41, 0, 907, 905, 1, 0, 0, 0, 907, 906, 1, 0, 0, 0, 908, 912, 1, 0, 0, 0, 909, 911, 3, 100, 42, 0, 910, 909, 1, 0, 0, 0, 911, 914, 1, 0, 0, 0, 912, 910, 1, 0, 0, 0, 912, 913, 1, 0, 0, 0, 913, 922, 1, 0, 0, 0, 914, 912, 1, 0, 0, 0, 915, 917, 3, 148, 66, 0, 916, 918, 3, 82, 33, 0, 917, 916, 1, 0, 0, 0, 918, 919, 1, 0, 0, 0, 919, 917, 1, 0, 0, 0, 919, 920, 1, 0, 0, 0, 920, 922, 1, 0, 0, 0, 921, 904, 1, 0, 0, 0, 921, 915, 1, 0, 0, 0, 922, 181, 1, 0, 0, 0, 923, 924, 5, 91, 0, 0, 924, 925, 1, 0, 0, 0, 925, 926, 6, 83, 0, 0, 926, 927, 6, 83, 0, 0, 927, 183, 1, 0, 0, 0, 928, 929, 5, 93, 0, 0, 929, 930, 1, 0, 0, 0, 930, 931, 6, 84, 15, 0, 931, 932, 6, 84, 15, 0, 932, 185, 1, 0, 0, 0, 933, 937, 3, 84, 34, 0, 934, 936, 3, 100, 42, 0, 935, 934, 1, 0, 0, 0, 936, 939, 1, 0, 0, 0, 937, 935, 1, 0, 0, 0, 937, 938, 1, 0, 0, 0, 938, 950, 1, 0, 0, 0, 939, 937, 1, 0, 0, 0, 940, 943, 3, 98, 41, 0, 941, 943, 3, 92, 38, 0, 942, 940, 1, 0, 0, 0, 942, 941, 1, 0, 0, 0, 943, 945, 1, 0, 0, 0, 944, 946, 3, 100, 42, 0, 945, 944, 1, 0, 0, 0, 946, 947, 1, 0, 0, 0, 947, 945, 1, 0, 0, 0, 947, 948, 1, 0, 0, 0, 948, 950, 1, 0, 0, 0, 949, 933, 1, 0, 0, 0, 949, 942, 1, 0, 0, 0, 950, 187, 1, 0, 0, 0, 951, 953, 3, 94, 39, 0, 952, 954, 3, 96, 40, 0, 953, 952, 1, 0, 0, 0, 954, 955, 1, 0, 0, 0, 955, 953, 1, 0, 0, 0, 955, 956, 1, 0, 0, 0, 956, 957, 1, 0, 0, 0, 957, 958, 3, 94, 39, 0, 958, 189, 1, 0, 0, 0, 959, 960, 3, 188, 86, 0, 960, 191, 1, 0, 0, 0, 961, 962, 3, 60, 22, 0, 962, 963, 1, 0, 0, 0, 963, 964, 6, 88, 11, 0, 964, 193, 1, 0, 0, 0, 965, 966, 3, 62, 23, 0, 966, 967, 1, 0, 0, 0, 967, 968, 6, 89, 11, 0, 968, 195, 1, 0, 0, 0, 969, 970, 3, 64, 24, 0, 970, 971, 1, 0, 0, 0, 971, 972, 6, 90, 11, 0, 972, 197, 1, 0, 0, 0, 973, 974, 3, 80, 32, 0, 974, 975, 1, 0, 0, 0, 975, 976, 6, 91, 14, 0, 976, 977, 6, 91, 15, 0, 977, 199, 1, 0, 0, 0, 978, 979, 3, 182, 83, 0, 979, 980, 1, 0, 0, 0, 980, 981, 6, 92, 12, 0, 981, 201, 1, 0, 0, 0, 982, 983, 3, 184, 84, 0, 983, 984, 1, 0, 0, 0, 984, 985, 6, 93, 16, 0, 985, 203, 1, 0, 0, 0, 986, 987, 3, 368, 176, 0, 987, 988, 1, 0, 0, 0, 988, 989, 6, 94, 17, 0, 989, 205, 1, 0, 0, 0, 990, 991, 3, 118, 51, 0, 991, 992, 1, 0, 0, 0, 992, 993, 6, 95, 18, 0, 993, 207, 1, 0, 0, 0, 994, 995, 3, 114, 49, 0, 995, 996, 1, 0, 0, 0, 996, 997, 6, 96, 19, 0, 997, 209, 1, 0, 0, 0, 998, 999, 5, 109, 0, 0, 999, 1000, 5, 101, 0, 0, 1000, 1001, 5, 116, 0, 0, 1001, 1002, 5, 97, 0, 0, 1002, 1003, 5, 100, 0, 0, 1003, 1004, 5, 97, 0, 0, 1004, 1005, 5, 116, 0, 0, 1005, 1006, 5, 97, 0, 0, 1006, 211, 1, 0, 0, 0, 1007, 1008, 3, 68, 26, 0, 1008, 1009, 1, 0, 0, 0, 1009, 1010, 6, 98, 20, 0, 1010, 213, 1, 0, 0, 0, 1011, 1012, 3, 102, 43, 0, 1012, 1013, 1, 0, 0, 0, 1013, 1014, 6, 99, 21, 0, 1014, 215, 1, 0, 0, 0, 1015, 1016, 3, 60, 22, 0, 1016, 1017, 1, 0, 0, 0, 1017, 1018, 6, 100, 11, 0, 1018, 217, 1, 0, 0, 0, 1019, 1020, 3, 62, 23, 0, 1020, 1021, 1, 0, 0, 0, 1021, 1022, 6, 101, 11, 0, 1022, 219, 1, 0, 0, 0, 1023, 1024, 3, 64, 24, 0, 1024, 1025, 1, 0, 0, 0, 1025, 1026, 6, 102, 11, 0, 1026, 221, 1, 0, 0, 0, 1027, 1028, 3, 80, 32, 0, 1028, 1029, 1, 0, 0, 0, 1029, 1030, 6, 103, 14, 0, 1030, 1031, 6, 103, 15, 0, 1031, 223, 1, 0, 0, 0, 1032, 1033, 3, 122, 53, 0, 1033, 1034, 1, 0, 0, 0, 1034, 1035, 6, 104, 22, 0, 1035, 225, 1, 0, 0, 0, 1036, 1037, 3, 118, 51, 0, 1037, 1038, 1, 0, 0, 0, 1038, 1039, 6, 105, 18, 0, 1039, 227, 1, 0, 0, 0, 1040, 1045, 3, 84, 34, 0, 1041, 1045, 3, 82, 33, 0, 1042, 1045, 3, 98, 41, 0, 1043, 1045, 3, 174, 79, 0, 1044, 1040, 1, 0, 0, 0, 1044, 1041, 1, 0, 0, 0, 1044, 1042, 1, 0, 0, 0, 1044, 1043, 1, 0, 0, 0, 1045, 229, 1, 0, 0, 0, 1046, 1049, 3, 84, 34, 0, 1047, 1049, 3, 174, 79, 0, 1048, 1046, 1, 0, 0, 0, 1048, 1047, 1, 0, 0, 0, 1049, 1053, 1, 0, 0, 0, 1050, 1052, 3, 228, 106, 0, 1051, 1050, 1, 0, 0, 0, 1052, 1055, 1, 0, 0, 0, 1053, 1051, 1, 0, 0, 0, 1053, 1054, 1, 0, 0, 0, 1054, 1066, 1, 0, 0, 0, 1055, 1053, 1, 0, 0, 0, 1056, 1059, 3, 98, 41, 0, 1057, 1059, 3, 92, 38, 0, 1058, 1056, 1, 0, 0, 0, 1058, 1057, 1, 0, 0, 0, 1059, 1061, 1, 0, 0, 0, 1060, 1062, 3, 228, 106, 0, 1061, 1060, 1, 0, 0, 0, 1062, 1063, 1, 0, 0, 0, 1063, 1061, 1, 0, 0, 0, 1063, 1064, 1, 0, 0, 0, 1064, 1066, 1, 0, 0, 0, 1065, 1048, 1, 0, 0, 0, 1065, 1058, 1, 0, 0, 0, 1066, 231, 1, 0, 0, 0, 1067, 1070, 3, 230, 107, 0, 1068, 1070, 3, 188, 86, 0, 1069, 1067, 1, 0, 0, 0, 1069, 1068, 1, 0, 0, 0, 1070, 1071, 1, 0, 0, 0, 1071, 1069, 1, 0, 0, 0, 1071, 1072, 1, 0, 0, 0, 1072, 233, 1, 0, 0, 0, 1073, 1074, 3, 60, 22, 0, 1074, 1075, 1, 0, 0, 0, 1075, 1076, 6, 109, 11, 0, 1076, 235, 1, 0, 0, 0, 1077, 1078, 3, 62, 23, 0, 1078, 1079, 1, 0, 0, 0, 1079, 1080, 6, 110, 11, 0, 1080, 237, 1, 0, 0, 0, 1081, 1082, 3, 64, 24, 0, 1082, 1083, 1, 0, 0, 0, 1083, 1084, 6, 111, 11, 0, 1084, 239, 1, 0, 0, 0, 1085, 1086, 3, 80, 32, 0, 1086, 1087, 1, 0, 0, 0, 1087, 1088, 6, 112, 14, 0, 1088, 1089, 6, 112, 15, 0, 1089, 241, 1, 0, 0, 0, 1090, 1091, 3, 114, 49, 0, 1091, 1092, 1, 0, 0, 0, 1092, 1093, 6, 113, 19, 0, 1093, 243, 1, 0, 0, 0, 1094, 1095, 3, 118, 51, 0, 1095, 1096, 1, 0, 0, 0, 1096, 1097, 6, 114, 18, 0, 1097, 245, 1, 0, 0, 0, 1098, 1099, 3, 122, 53, 0, 1099, 1100, 1, 0, 0, 0, 1100, 1101, 6, 115, 22, 0, 1101, 247, 1, 0, 0, 0, 1102, 1103, 5, 97, 0, 0, 1103, 1104, 5, 115, 0, 0, 1104, 249, 1, 0, 0, 0, 1105, 1106, 3, 232, 108, 0, 1106, 1107, 1, 0, 0, 0, 1107, 1108, 6, 117, 23, 0, 1108, 251, 1, 0, 0, 0, 1109, 1110, 3, 60, 22, 0, 1110, 1111, 1, 0, 0, 0, 1111, 1112, 6, 118, 11, 0, 1112, 253, 1, 0, 0, 0, 1113, 1114, 3, 62, 23, 0, 1114, 1115, 1, 0, 0, 0, 1115, 1116, 6, 119, 11, 0, 1116, 255, 1, 0, 0, 0, 1117, 1118, 3, 64, 24, 0, 1118, 1119, 1, 0, 0, 0, 1119, 1120, 6, 120, 11, 0, 1120, 257, 1, 0, 0, 0, 1121, 1122, 3, 80, 32, 0, 1122, 1123, 1, 0, 0, 0, 1123, 1124, 6, 121, 14, 0, 1124, 1125, 6, 121, 15, 0, 1125, 259, 1, 0, 0, 0, 1126, 1127, 3, 182, 83, 0, 1127, 1128, 1, 0, 0, 0, 1128, 1129, 6, 122, 12, 0, 1129, 1130, 6, 122, 24, 0, 1130, 261, 1, 0, 0, 0, 1131, 1132, 5, 111, 0, 0, 1132, 1133, 5, 110, 0, 0, 1133, 1134, 1, 0, 0, 0, 1134, 1135, 6, 123, 25, 0, 1135, 263, 1, 0, 0, 0, 1136, 1137, 5, 119, 0, 0, 1137, 1138, 5, 105, 0, 0, 1138, 1139, 5, 116, 0, 0, 1139, 1140, 5, 104, 0, 0, 1140, 1141, 1, 0, 0, 0, 1141, 1142, 6, 124, 25, 0, 1142, 265, 1, 0, 0, 0, 1143, 1144, 8, 12, 0, 0, 1144, 267, 1, 0, 0, 0, 1145, 1147, 3, 266, 125, 0, 1146, 1145, 1, 0, 0, 0, 1147, 1148, 1, 0, 0, 0, 1148, 1146, 1, 0, 0, 0, 1148, 1149, 1, 0, 0, 0, 1149, 1150, 1, 0, 0, 0, 1150, 1151, 3, 368, 176, 0, 1151, 1153, 1, 0, 0, 0, 1152, 1146, 1, 0, 0, 0, 1152, 1153, 1, 0, 0, 0, 1153, 1155, 1, 0, 0, 0, 1154, 1156, 3, 266, 125, 0, 1155, 1154, 1, 0, 0, 0, 1156, 1157, 1, 0, 0, 0, 1157, 1155, 1, 0, 0, 0, 1157, 1158, 1, 0, 0, 0, 1158, 269, 1, 0, 0, 0, 1159, 1160, 3, 268, 126, 0, 1160, 1161, 1, 0, 0, 0, 1161, 1162, 6, 127, 26, 0, 1162, 271, 1, 0, 0, 0, 1163, 1164, 3, 60, 22, 0, 1164, 1165, 1, 0, 0, 0, 1165, 1166, 6, 128, 11, 0, 1166, 273, 1, 0, 0, 0, 1167, 1168, 3, 62, 23, 0, 1168, 1169, 1, 0, 0, 0, 1169, 1170, 6, 129, 11, 0, 1170, 275, 1, 0, 0, 0, 1171, 1172, 3, 64, 24, 0, 1172, 1173, 1, 0, 0, 0, 1173, 1174, 6, 130, 11, 0, 1174, 277, 1, 0, 0, 0, 1175, 1176, 3, 80, 32, 0, 1176, 1177, 1, 0, 0, 0, 1177, 1178, 6, 131, 14, 0, 1178, 1179, 6, 131, 15, 0, 1179, 1180, 6, 131, 15, 0, 1180, 279, 1, 0, 0, 0, 1181, 1182, 3, 114, 49, 0, 1182, 1183, 1, 0, 0, 0, 1183, 1184, 6, 132, 19, 0, 1184, 281, 1, 0, 0, 0, 1185, 1186, 3, 118, 51, 0, 1186, 1187, 1, 0, 0, 0, 1187, 1188, 6, 133, 18, 0, 1188, 283, 1, 0, 0, 0, 1189, 1190, 3, 122, 53, 0, 1190, 1191, 1, 0, 0, 0, 1191, 1192, 6, 134, 22, 0, 1192, 285, 1, 0, 0, 0, 1193, 1194, 3, 264, 124, 0, 1194, 1195, 1, 0, 0, 0, 1195, 1196, 6, 135, 27, 0, 1196, 287, 1, 0, 0, 0, 1197, 1198, 3, 232, 108, 0, 1198, 1199, 1, 0, 0, 0, 1199, 1200, 6, 136, 23, 0, 1200, 289, 1, 0, 0, 0, 1201, 1202, 3, 190, 87, 0, 1202, 1203, 1, 0, 0, 0, 1203, 1204, 6, 137, 28, 0, 1204, 291, 1, 0, 0, 0, 1205, 1206, 3, 60, 22, 0, 1206, 1207, 1, 0, 0, 0, 1207, 1208, 6, 138, 11, 0, 1208, 293, 1, 0, 0, 0, 1209, 1210, 3, 62, 23, 0, 1210, 1211, 1, 0, 0, 0, 1211, 1212, 6, 139, 11, 0, 1212, 295, 1, 0, 0, 0, 1213, 1214, 3, 64, 24, 0, 1214, 1215, 1, 0, 0, 0, 1215, 1216, 6, 140, 11, 0, 1216, 297, 1, 0, 0, 0, 1217, 1218, 3, 80, 32, 0, 1218, 1219, 1, 0, 0, 0, 1219, 1220, 6, 141, 14, 0, 1220, 1221, 6, 141, 15, 0, 1221, 299, 1, 0, 0, 0, 1222, 1223, 3, 368, 176, 0, 1223, 1224, 1, 0, 0, 0, 1224, 1225, 6, 142, 17, 0, 1225, 301, 1, 0, 0, 0, 1226, 1227, 3, 118, 51, 0, 1227, 1228, 1, 0, 0, 0, 1228, 1229, 6, 143, 18, 0, 1229, 303, 1, 0, 0, 0, 1230, 1231, 3, 122, 53, 0, 1231, 1232, 1, 0, 0, 0, 1232, 1233, 6, 144, 22, 0, 1233, 305, 1, 0, 0, 0, 1234, 1235, 3, 262, 123, 0, 1235, 1236, 1, 0, 0, 0, 1236, 1237, 6, 145, 29, 0, 1237, 1238, 6, 145, 30, 0, 1238, 307, 1, 0, 0, 0, 1239, 1240, 3, 68, 26, 0, 1240, 1241, 1, 0, 0, 0, 1241, 1242, 6, 146, 20, 0, 1242, 309, 1, 0, 0, 0, 1243, 1244, 3, 102, 43, 0, 1244, 1245, 1, 0, 0, 0, 1245, 1246, 6, 147, 21, 0, 1246, 311, 1, 0, 0, 0, 1247, 1248, 3, 60, 22, 0, 1248, 1249, 1, 0, 0, 0, 1249, 1250, 6, 148, 11, 0, 1250, 313, 1, 0, 0, 0, 1251, 1252, 3, 62, 23, 0, 1252, 1253, 1, 0, 0, 0, 1253, 1254, 6, 149, 11, 0, 1254, 315, 1, 0, 0, 0, 1255, 1256, 3, 64, 24, 0, 1256, 1257, 1, 0, 0, 0, 1257, 1258, 6, 150, 11, 0, 1258, 317, 1, 0, 0, 0, 1259, 1260, 3, 80, 32, 0, 1260, 1261, 1, 0, 0, 0, 1261, 1262, 6, 151, 14, 0, 1262, 1263, 6, 151, 15, 0, 1263, 1264, 6, 151, 15, 0, 1264, 319, 1, 0, 0, 0, 1265, 1266, 3, 118, 51, 0, 1266, 1267, 1, 0, 0, 0, 1267, 1268, 6, 152, 18, 0, 1268, 321, 1, 0, 0, 0, 1269, 1270, 3, 122, 53, 0, 1270, 1271, 1, 0, 0, 0, 1271, 1272, 6, 153, 22, 0, 1272, 323, 1, 0, 0, 0, 1273, 1274, 3, 232, 108, 0, 1274, 1275, 1, 0, 0, 0, 1275, 1276, 6, 154, 23, 0, 1276, 325, 1, 0, 0, 0, 1277, 1278, 3, 60, 22, 0, 1278, 1279, 1, 0, 0, 0, 1279, 1280, 6, 155, 11, 0, 1280, 327, 1, 0, 0, 0, 1281, 1282, 3, 62, 23, 0, 1282, 1283, 1, 0, 0, 0, 1283, 1284, 6, 156, 11, 0, 1284, 329, 1, 0, 0, 0, 1285, 1286, 3, 64, 24, 0, 1286, 1287, 1, 0, 0, 0, 1287, 1288, 6, 157, 11, 0, 1288, 331, 1, 0, 0, 0, 1289, 1290, 3, 80, 32, 0, 1290, 1291, 1, 0, 0, 0, 1291, 1292, 6, 158, 14, 0, 1292, 1293, 6, 158, 15, 0, 1293, 333, 1, 0, 0, 0, 1294, 1295, 3, 122, 53, 0, 1295, 1296, 1, 0, 0, 0, 1296, 1297, 6, 159, 22, 0, 1297, 335, 1, 0, 0, 0, 1298, 1299, 3, 190, 87, 0, 1299, 1300, 1, 0, 0, 0, 1300, 1301, 6, 160, 28, 0, 1301, 337, 1, 0, 0, 0, 1302, 1303, 3, 186, 85, 0, 1303, 1304, 1, 0, 0, 0, 1304, 1305, 6, 161, 31, 0, 1305, 339, 1, 0, 0, 0, 1306, 1307, 3, 60, 22, 0, 1307, 1308, 1, 0, 0, 0, 1308, 1309, 6, 162, 11, 0, 1309, 341, 1, 0, 0, 0, 1310, 1311, 3, 62, 23, 0, 1311, 1312, 1, 0, 0, 0, 1312, 1313, 6, 163, 11, 0, 1313, 343, 1, 0, 0, 0, 1314, 1315, 3, 64, 24, 0, 1315, 1316, 1, 0, 0, 0, 1316, 1317, 6, 164, 11, 0, 1317, 345, 1, 0, 0, 0, 1318, 1319, 3, 80, 32, 0, 1319, 1320, 1, 0, 0, 0, 1320, 1321, 6, 165, 14, 0, 1321, 1322, 6, 165, 15, 0, 1322, 347, 1, 0, 0, 0, 1323, 1324, 5, 105, 0, 0, 1324, 1325, 5, 110, 0, 0, 1325, 1326, 5, 102, 0, 0, 1326, 1327, 5, 111, 0, 0, 1327, 349, 1, 0, 0, 0, 1328, 1329, 3, 60, 22, 0, 1329, 1330, 1, 0, 0, 0, 1330, 1331, 6, 167, 11, 0, 1331, 351, 1, 0, 0, 0, 1332, 1333, 3, 62, 23, 0, 1333, 1334, 1, 0, 0, 0, 1334, 1335, 6, 168, 11, 0, 1335, 353, 1, 0, 0, 0, 1336, 1337, 3, 64, 24, 0, 1337, 1338, 1, 0, 0, 0, 1338, 1339, 6, 169, 11, 0, 1339, 355, 1, 0, 0, 0, 1340, 1341, 3, 80, 32, 0, 1341, 1342, 1, 0, 0, 0, 1342, 1343, 6, 170, 14, 0, 1343, 1344, 6, 170, 15, 0, 1344, 357, 1, 0, 0, 0, 1345, 1346, 5, 102, 0, 0, 1346, 1347, 5, 117, 0, 0, 1347, 1348, 5, 110, 0, 0, 1348, 1349, 5, 99, 0, 0, 1349, 1350, 5, 116, 0, 0, 1350, 1351, 5, 105, 0, 0, 1351, 1352, 5, 111, 0, 0, 1352, 1353, 5, 110, 0, 0, 1353, 1354, 5, 115, 0, 0, 1354, 359, 1, 0, 0, 0, 1355, 1356, 3, 60, 22, 0, 1356, 1357, 1, 0, 0, 0, 1357, 1358, 6, 172, 11, 0, 1358, 361, 1, 0, 0, 0, 1359, 1360, 3, 62, 23, 0, 1360, 1361, 1, 0, 0, 0, 1361, 1362, 6, 173, 11, 0, 1362, 363, 1, 0, 0, 0, 1363, 1364, 3, 64, 24, 0, 1364, 1365, 1, 0, 0, 0, 1365, 1366, 6, 174, 11, 0, 1366, 365, 1, 0, 0, 0, 1367, 1368, 3, 184, 84, 0, 1368, 1369, 1, 0, 0, 0, 1369, 1370, 6, 175, 16, 0, 1370, 1371, 6, 175, 15, 0, 1371, 367, 1, 0, 0, 0, 1372, 1373, 5, 58, 0, 0, 1373, 369, 1, 0, 0, 0, 1374, 1380, 3, 92, 38, 0, 1375, 1380, 3, 82, 33, 0, 1376, 1380, 3, 122, 53, 0, 1377, 1380, 3, 84, 34, 0, 1378, 1380, 3, 98, 41, 0, 1379, 1374, 1, 0, 0, 0, 1379, 1375, 1, 0, 0, 0, 1379, 1376, 1, 0, 0, 0, 1379, 1377, 1, 0, 0, 0, 1379, 1378, 1, 0, 0, 0, 1380, 1381, 1, 0, 0, 0, 1381, 1379, 1, 0, 0, 0, 1381, 1382, 1, 0, 0, 0, 1382, 371, 1, 0, 0, 0, 1383, 1384, 3, 60, 22, 0, 1384, 1385, 1, 0, 0, 0, 1385, 1386, 6, 178, 11, 0, 1386, 373, 1, 0, 0, 0, 1387, 1388, 3, 62, 23, 0, 1388, 1389, 1, 0, 0, 0, 1389, 1390, 6, 179, 11, 0, 1390, 375, 1, 0, 0, 0, 1391, 1392, 3, 64, 24, 0, 1392, 1393, 1, 0, 0, 0, 1393, 1394, 6, 180, 11, 0, 1394, 377, 1, 0, 0, 0, 1395, 1396, 3, 80, 32, 0, 1396, 1397, 1, 0, 0, 0, 1397, 1398, 6, 181, 14, 0, 1398, 1399, 6, 181, 15, 0, 1399, 379, 1, 0, 0, 0, 1400, 1401, 3, 68, 26, 0, 1401, 1402, 1, 0, 0, 0, 1402, 1403, 6, 182, 20, 0, 1403, 1404, 6, 182, 15, 0, 1404, 1405, 6, 182, 32, 0, 1405, 381, 1, 0, 0, 0, 1406, 1407, 3, 102, 43, 0, 1407, 1408, 1, 0, 0, 0, 1408, 1409, 6, 183, 21, 0, 1409, 1410, 6, 183, 15, 0, 1410, 1411, 6, 183, 32, 0, 1411, 383, 1, 0, 0, 0, 1412, 1413, 3, 60, 22, 0, 1413, 1414, 1, 0, 0, 0, 1414, 1415, 6, 184, 11, 0, 1415, 385, 1, 0, 0, 0, 1416, 1417, 3, 62, 23, 0, 1417, 1418, 1, 0, 0, 0, 1418, 1419, 6, 185, 11, 0, 1419, 387, 1, 0, 0, 0, 1420, 1421, 3, 64, 24, 0, 1421, 1422, 1, 0, 0, 0, 1422, 1423, 6, 186, 11, 0, 1423, 389, 1, 0, 0, 0, 1424, 1425, 3, 368, 176, 0, 1425, 1426, 1, 0, 0, 0, 1426, 1427, 6, 187, 17, 0, 1427, 1428, 6, 187, 15, 0, 1428, 1429, 6, 187, 7, 0, 1429, 391, 1, 0, 0, 0, 1430, 1431, 3, 118, 51, 0, 1431, 1432, 1, 0, 0, 0, 1432, 1433, 6, 188, 18, 0, 1433, 1434, 6, 188, 15, 0, 1434, 1435, 6, 188, 7, 0, 1435, 393, 1, 0, 0, 0, 1436, 1437, 3, 60, 22, 0, 1437, 1438, 1, 0, 0, 0, 1438, 1439, 6, 189, 11, 0, 1439, 395, 1, 0, 0, 0, 1440, 1441, 3, 62, 23, 0, 1441, 1442, 1, 0, 0, 0, 1442, 1443, 6, 190, 11, 0, 1443, 397, 1, 0, 0, 0, 1444, 1445, 3, 64, 24, 0, 1445, 1446, 1, 0, 0, 0, 1446, 1447, 6, 191, 11, 0, 1447, 399, 1, 0, 0, 0, 1448, 1449, 3, 190, 87, 0, 1449, 1450, 1, 0, 0, 0, 1450, 1451, 6, 192, 15, 0, 1451, 1452, 6, 192, 0, 0, 1452, 1453, 6, 192, 28, 0, 1453, 401, 1, 0, 0, 0, 1454, 1455, 3, 186, 85, 0, 1455, 1456, 1, 0, 0, 0, 1456, 1457, 6, 193, 15, 0, 1457, 1458, 6, 193, 0, 0, 1458, 1459, 6, 193, 31, 0, 1459, 403, 1, 0, 0, 0, 1460, 1461, 3, 108, 46, 0, 1461, 1462, 1, 0, 0, 0, 1462, 1463, 6, 194, 15, 0, 1463, 1464, 6, 194, 0, 0, 1464, 1465, 6, 194, 33, 0, 1465, 405, 1, 0, 0, 0, 1466, 1467, 3, 80, 32, 0, 1467, 1468, 1, 0, 0, 0, 1468, 1469, 6, 195, 14, 0, 1469, 1470, 6, 195, 15, 0, 1470, 407, 1, 0, 0, 0, 66, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 588, 598, 602, 605, 614, 616, 627, 634, 639, 678, 683, 692, 699, 704, 706, 717, 725, 728, 730, 735, 740, 746, 753, 758, 764, 767, 775, 779, 907, 912, 919, 921, 937, 942, 947, 949, 955, 1044, 1048, 1053, 1058, 1063, 1065, 1069, 1071, 1148, 1152, 1157, 1379, 1381, 34, 5, 2, 0, 5, 4, 0, 5, 6, 0, 5, 1, 0, 5, 3, 0, 5, 8, 0, 5, 12, 0, 5, 14, 0, 5, 10, 0, 5, 5, 0, 5, 11, 0, 0, 1, 0, 7, 71, 0, 5, 0, 0, 7, 30, 0, 4, 0, 0, 7, 72, 0, 7, 116, 0, 7, 39, 0, 7, 37, 0, 7, 26, 0, 7, 31, 0, 7, 41, 0, 7, 82, 0, 5, 13, 0, 5, 7, 0, 7, 92, 0, 7, 91, 0, 7, 74, 0, 7, 90, 0, 5, 9, 0, 7, 73, 0, 5, 15, 0, 7, 34, 0] \ No newline at end of file diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.java index 831be58254d6e..98760b8595c32 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.java @@ -214,7 +214,7 @@ public EsqlBaseLexer(CharStream input) { public ATN getATN() { return _ATN; } public static final String _serializedATN = - "\u0004\u0000~\u05bc\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ + "\u0004\u0000~\u05bf\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ "\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ "\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ "\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ @@ -335,349 +335,349 @@ public EsqlBaseLexer(CharStream input) { "F\u0001F\u0001G\u0001G\u0001G\u0001H\u0001H\u0001H\u0001I\u0001I\u0001"+ "J\u0001J\u0001J\u0001K\u0001K\u0001L\u0001L\u0001L\u0001M\u0001M\u0001"+ "N\u0001N\u0001O\u0001O\u0001P\u0001P\u0001Q\u0001Q\u0001R\u0001R\u0001"+ - "R\u0005R\u038c\bR\nR\fR\u038f\tR\u0001R\u0001R\u0004R\u0393\bR\u000bR"+ - "\fR\u0394\u0003R\u0397\bR\u0001S\u0001S\u0001S\u0001S\u0001S\u0001T\u0001"+ - "T\u0001T\u0001T\u0001T\u0001U\u0001U\u0005U\u03a5\bU\nU\fU\u03a8\tU\u0001"+ - "U\u0001U\u0003U\u03ac\bU\u0001U\u0004U\u03af\bU\u000bU\fU\u03b0\u0003"+ - "U\u03b3\bU\u0001V\u0001V\u0004V\u03b7\bV\u000bV\fV\u03b8\u0001V\u0001"+ - "V\u0001W\u0001W\u0001X\u0001X\u0001X\u0001X\u0001Y\u0001Y\u0001Y\u0001"+ - "Y\u0001Z\u0001Z\u0001Z\u0001Z\u0001[\u0001[\u0001[\u0001[\u0001[\u0001"+ - "\\\u0001\\\u0001\\\u0001\\\u0001]\u0001]\u0001]\u0001]\u0001^\u0001^\u0001"+ - "^\u0001^\u0001_\u0001_\u0001_\u0001_\u0001`\u0001`\u0001`\u0001`\u0001"+ - "a\u0001a\u0001a\u0001a\u0001a\u0001a\u0001a\u0001a\u0001a\u0001b\u0001"+ - "b\u0001b\u0001b\u0001c\u0001c\u0001c\u0001c\u0001d\u0001d\u0001d\u0001"+ - "d\u0001e\u0001e\u0001e\u0001e\u0001f\u0001f\u0001f\u0001f\u0001g\u0001"+ - "g\u0001g\u0001g\u0001g\u0001h\u0001h\u0001h\u0001h\u0001i\u0001i\u0001"+ - "i\u0001i\u0001j\u0001j\u0001j\u0001j\u0003j\u0412\bj\u0001k\u0001k\u0003"+ - "k\u0416\bk\u0001k\u0005k\u0419\bk\nk\fk\u041c\tk\u0001k\u0001k\u0003k"+ - "\u0420\bk\u0001k\u0004k\u0423\bk\u000bk\fk\u0424\u0003k\u0427\bk\u0001"+ - "l\u0001l\u0004l\u042b\bl\u000bl\fl\u042c\u0001m\u0001m\u0001m\u0001m\u0001"+ - "n\u0001n\u0001n\u0001n\u0001o\u0001o\u0001o\u0001o\u0001p\u0001p\u0001"+ - "p\u0001p\u0001p\u0001q\u0001q\u0001q\u0001q\u0001r\u0001r\u0001r\u0001"+ - "r\u0001s\u0001s\u0001s\u0001s\u0001t\u0001t\u0001t\u0001u\u0001u\u0001"+ - "u\u0001u\u0001v\u0001v\u0001v\u0001v\u0001w\u0001w\u0001w\u0001w\u0001"+ - "x\u0001x\u0001x\u0001x\u0001y\u0001y\u0001y\u0001y\u0001y\u0001z\u0001"+ - "z\u0001z\u0001z\u0001z\u0001{\u0001{\u0001{\u0001{\u0001{\u0001|\u0001"+ - "|\u0001|\u0001|\u0001|\u0001|\u0001|\u0001}\u0001}\u0001~\u0004~\u0478"+ - "\b~\u000b~\f~\u0479\u0001~\u0001~\u0003~\u047e\b~\u0001~\u0004~\u0481"+ - "\b~\u000b~\f~\u0482\u0001\u007f\u0001\u007f\u0001\u007f\u0001\u007f\u0001"+ - "\u0080\u0001\u0080\u0001\u0080\u0001\u0080\u0001\u0081\u0001\u0081\u0001"+ - "\u0081\u0001\u0081\u0001\u0082\u0001\u0082\u0001\u0082\u0001\u0082\u0001"+ - "\u0083\u0001\u0083\u0001\u0083\u0001\u0083\u0001\u0083\u0001\u0083\u0001"+ - "\u0084\u0001\u0084\u0001\u0084\u0001\u0084\u0001\u0085\u0001\u0085\u0001"+ - "\u0085\u0001\u0085\u0001\u0086\u0001\u0086\u0001\u0086\u0001\u0086\u0001"+ - "\u0087\u0001\u0087\u0001\u0087\u0001\u0087\u0001\u0088\u0001\u0088\u0001"+ - "\u0088\u0001\u0088\u0001\u0089\u0001\u0089\u0001\u0089\u0001\u0089\u0001"+ - "\u008a\u0001\u008a\u0001\u008a\u0001\u008a\u0001\u008b\u0001\u008b\u0001"+ - "\u008b\u0001\u008b\u0001\u008c\u0001\u008c\u0001\u008c\u0001\u008c\u0001"+ - "\u008d\u0001\u008d\u0001\u008d\u0001\u008d\u0001\u008d\u0001\u008e\u0001"+ - "\u008e\u0001\u008e\u0001\u008e\u0001\u008f\u0001\u008f\u0001\u008f\u0001"+ - "\u008f\u0001\u0090\u0001\u0090\u0001\u0090\u0001\u0090\u0001\u0091\u0001"+ - "\u0091\u0001\u0091\u0001\u0091\u0001\u0091\u0001\u0092\u0001\u0092\u0001"+ - "\u0092\u0001\u0092\u0001\u0093\u0001\u0093\u0001\u0093\u0001\u0093\u0001"+ - "\u0094\u0001\u0094\u0001\u0094\u0001\u0094\u0001\u0095\u0001\u0095\u0001"+ - "\u0095\u0001\u0095\u0001\u0096\u0001\u0096\u0001\u0096\u0001\u0096\u0001"+ - "\u0097\u0001\u0097\u0001\u0097\u0001\u0097\u0001\u0097\u0001\u0097\u0001"+ - "\u0098\u0001\u0098\u0001\u0098\u0001\u0098\u0001\u0099\u0001\u0099\u0001"+ - "\u0099\u0001\u0099\u0001\u009a\u0001\u009a\u0001\u009a\u0001\u009a\u0001"+ - "\u009b\u0001\u009b\u0001\u009b\u0001\u009b\u0001\u009c\u0001\u009c\u0001"+ - "\u009c\u0001\u009c\u0001\u009d\u0001\u009d\u0001\u009d\u0001\u009d\u0001"+ - "\u009e\u0001\u009e\u0001\u009e\u0001\u009e\u0001\u009e\u0001\u009f\u0001"+ - "\u009f\u0001\u009f\u0001\u009f\u0001\u00a0\u0001\u00a0\u0001\u00a0\u0001"+ - "\u00a0\u0001\u00a1\u0001\u00a1\u0001\u00a1\u0001\u00a1\u0001\u00a2\u0001"+ - "\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001"+ - "\u00a3\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0001\u00a5\u0001"+ - "\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a6\u0001\u00a6\u0001"+ - "\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a7\u0001\u00a7\u0001\u00a7\u0001"+ - "\u00a7\u0001\u00a8\u0001\u00a8\u0001\u00a8\u0001\u00a8\u0001\u00a9\u0001"+ - "\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001"+ - "\u00aa\u0001\u00aa\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001"+ - "\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001"+ - "\u00ac\u0001\u00ac\u0001\u00ac\u0001\u00ac\u0001\u00ad\u0001\u00ad\u0001"+ - "\u00ad\u0001\u00ad\u0001\u00ae\u0001\u00ae\u0001\u00ae\u0001\u00ae\u0001"+ - "\u00af\u0001\u00af\u0001\u00af\u0001\u00af\u0001\u00af\u0001\u00b0\u0001"+ - "\u00b0\u0001\u00b1\u0001\u00b1\u0001\u00b1\u0001\u00b1\u0001\u00b1\u0004"+ - "\u00b1\u0561\b\u00b1\u000b\u00b1\f\u00b1\u0562\u0001\u00b2\u0001\u00b2"+ - "\u0001\u00b2\u0001\u00b2\u0001\u00b3\u0001\u00b3\u0001\u00b3\u0001\u00b3"+ - "\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0001\u00b5\u0001\u00b5"+ - "\u0001\u00b5\u0001\u00b5\u0001\u00b5\u0001\u00b6\u0001\u00b6\u0001\u00b6"+ - "\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b7\u0001\u00b7\u0001\u00b7"+ - "\u0001\u00b7\u0001\u00b7\u0001\u00b7\u0001\u00b8\u0001\u00b8\u0001\u00b8"+ - "\u0001\u00b8\u0001\u00b9\u0001\u00b9\u0001\u00b9\u0001\u00b9\u0001\u00ba"+ - "\u0001\u00ba\u0001\u00ba\u0001\u00ba\u0001\u00bb\u0001\u00bb\u0001\u00bb"+ - "\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bc\u0001\u00bc\u0001\u00bc"+ - "\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bd\u0001\u00bd\u0001\u00bd"+ - "\u0001\u00bd\u0001\u00be\u0001\u00be\u0001\u00be\u0001\u00be\u0001\u00bf"+ - "\u0001\u00bf\u0001\u00bf\u0001\u00bf\u0001\u00c0\u0001\u00c0\u0001\u00c0"+ - "\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c1\u0001\u00c1\u0001\u00c1"+ - "\u0001\u00c1\u0001\u00c1\u0001\u00c1\u0001\u00c2\u0001\u00c2\u0001\u00c2"+ - "\u0001\u00c2\u0001\u00c2\u0001\u00c2\u0001\u00c3\u0001\u00c3\u0001\u00c3"+ - "\u0001\u00c3\u0001\u00c3\u0002\u0268\u02cd\u0000\u00c4\u0010\u0001\u0012"+ - "\u0002\u0014\u0003\u0016\u0004\u0018\u0005\u001a\u0006\u001c\u0007\u001e"+ - "\b \t\"\n$\u000b&\f(\r*\u000e,\u000f.\u00100\u00112\u00124\u00136\u0014"+ - "8\u0015:\u0016<\u0017>\u0018@\u0019B\u0000D\u001aF\u0000H\u0000J\u001b"+ - "L\u001cN\u001dP\u001eR\u0000T\u0000V\u0000X\u0000Z\u0000\\\u0000^\u0000"+ - "`\u0000b\u0000d\u0000f\u001fh j!l\"n#p$r%t&v\'x(z)|*~+\u0080,\u0082-\u0084"+ - ".\u0086/\u00880\u008a1\u008c2\u008e3\u00904\u00925\u00946\u00967\u0098"+ - "8\u009a9\u009c:\u009e;\u00a0<\u00a2=\u00a4>\u00a6?\u00a8@\u00aaA\u00ac"+ - "B\u00aeC\u00b0D\u00b2E\u00b4F\u00b6G\u00b8H\u00baI\u00bc\u0000\u00beJ"+ - "\u00c0K\u00c2L\u00c4M\u00c6\u0000\u00c8\u0000\u00ca\u0000\u00cc\u0000"+ - "\u00ce\u0000\u00d0\u0000\u00d2N\u00d4\u0000\u00d6\u0000\u00d8O\u00daP"+ - "\u00dcQ\u00de\u0000\u00e0\u0000\u00e2\u0000\u00e4\u0000\u00e6\u0000\u00e8"+ - "R\u00eaS\u00ecT\u00eeU\u00f0\u0000\u00f2\u0000\u00f4\u0000\u00f6\u0000"+ - "\u00f8V\u00fa\u0000\u00fcW\u00feX\u0100Y\u0102\u0000\u0104\u0000\u0106"+ - "Z\u0108[\u010a\u0000\u010c\\\u010e\u0000\u0110]\u0112^\u0114_\u0116\u0000"+ - "\u0118\u0000\u011a\u0000\u011c\u0000\u011e\u0000\u0120\u0000\u0122\u0000"+ - "\u0124`\u0126a\u0128b\u012a\u0000\u012c\u0000\u012e\u0000\u0130\u0000"+ - "\u0132\u0000\u0134\u0000\u0136\u0000\u0138c\u013ad\u013ce\u013e\u0000"+ - "\u0140\u0000\u0142\u0000\u0144\u0000\u0146f\u0148g\u014ah\u014c\u0000"+ - "\u014e\u0000\u0150\u0000\u0152\u0000\u0154i\u0156j\u0158k\u015a\u0000"+ - "\u015cl\u015em\u0160n\u0162o\u0164\u0000\u0166p\u0168q\u016ar\u016cs\u016e"+ - "\u0000\u0170t\u0172u\u0174v\u0176w\u0178x\u017a\u0000\u017c\u0000\u017e"+ - "\u0000\u0180y\u0182z\u0184{\u0186\u0000\u0188\u0000\u018a|\u018c}\u018e"+ - "~\u0190\u0000\u0192\u0000\u0194\u0000\u0196\u0000\u0010\u0000\u0001\u0002"+ - "\u0003\u0004\u0005\u0006\u0007\b\t\n\u000b\f\r\u000e\u000f\r\u0006\u0000"+ - "\t\n\r\r //[[]]\u0002\u0000\n\n\r\r\u0003\u0000\t\n\r\r \u000b\u0000"+ - "\t\n\r\r \"\",,//::==[[]]||\u0002\u0000**//\u0001\u000009\u0002\u0000"+ - "AZaz\u0005\u0000\"\"\\\\nnrrtt\u0004\u0000\n\n\r\r\"\"\\\\\u0002\u0000"+ - "EEee\u0002\u0000++--\u0001\u0000``\u000b\u0000\t\n\r\r \"#,,//::<<>?"+ - "\\\\||\u05d6\u0000\u0010\u0001\u0000\u0000\u0000\u0000\u0012\u0001\u0000"+ - "\u0000\u0000\u0000\u0014\u0001\u0000\u0000\u0000\u0000\u0016\u0001\u0000"+ - "\u0000\u0000\u0000\u0018\u0001\u0000\u0000\u0000\u0000\u001a\u0001\u0000"+ - "\u0000\u0000\u0000\u001c\u0001\u0000\u0000\u0000\u0000\u001e\u0001\u0000"+ - "\u0000\u0000\u0000 \u0001\u0000\u0000\u0000\u0000\"\u0001\u0000\u0000"+ - "\u0000\u0000$\u0001\u0000\u0000\u0000\u0000&\u0001\u0000\u0000\u0000\u0000"+ - "(\u0001\u0000\u0000\u0000\u0000*\u0001\u0000\u0000\u0000\u0000,\u0001"+ - "\u0000\u0000\u0000\u0000.\u0001\u0000\u0000\u0000\u00000\u0001\u0000\u0000"+ - "\u0000\u00002\u0001\u0000\u0000\u0000\u00004\u0001\u0000\u0000\u0000\u0000"+ - "6\u0001\u0000\u0000\u0000\u00008\u0001\u0000\u0000\u0000\u0000:\u0001"+ - "\u0000\u0000\u0000\u0000<\u0001\u0000\u0000\u0000\u0000>\u0001\u0000\u0000"+ - "\u0000\u0000@\u0001\u0000\u0000\u0000\u0000D\u0001\u0000\u0000\u0000\u0001"+ - "F\u0001\u0000\u0000\u0000\u0001H\u0001\u0000\u0000\u0000\u0001J\u0001"+ - "\u0000\u0000\u0000\u0001L\u0001\u0000\u0000\u0000\u0001N\u0001\u0000\u0000"+ - "\u0000\u0002P\u0001\u0000\u0000\u0000\u0002f\u0001\u0000\u0000\u0000\u0002"+ - "h\u0001\u0000\u0000\u0000\u0002j\u0001\u0000\u0000\u0000\u0002l\u0001"+ - "\u0000\u0000\u0000\u0002n\u0001\u0000\u0000\u0000\u0002p\u0001\u0000\u0000"+ - "\u0000\u0002r\u0001\u0000\u0000\u0000\u0002t\u0001\u0000\u0000\u0000\u0002"+ - "v\u0001\u0000\u0000\u0000\u0002x\u0001\u0000\u0000\u0000\u0002z\u0001"+ - "\u0000\u0000\u0000\u0002|\u0001\u0000\u0000\u0000\u0002~\u0001\u0000\u0000"+ - "\u0000\u0002\u0080\u0001\u0000\u0000\u0000\u0002\u0082\u0001\u0000\u0000"+ - "\u0000\u0002\u0084\u0001\u0000\u0000\u0000\u0002\u0086\u0001\u0000\u0000"+ - "\u0000\u0002\u0088\u0001\u0000\u0000\u0000\u0002\u008a\u0001\u0000\u0000"+ - "\u0000\u0002\u008c\u0001\u0000\u0000\u0000\u0002\u008e\u0001\u0000\u0000"+ - "\u0000\u0002\u0090\u0001\u0000\u0000\u0000\u0002\u0092\u0001\u0000\u0000"+ - "\u0000\u0002\u0094\u0001\u0000\u0000\u0000\u0002\u0096\u0001\u0000\u0000"+ - "\u0000\u0002\u0098\u0001\u0000\u0000\u0000\u0002\u009a\u0001\u0000\u0000"+ - "\u0000\u0002\u009c\u0001\u0000\u0000\u0000\u0002\u009e\u0001\u0000\u0000"+ - "\u0000\u0002\u00a0\u0001\u0000\u0000\u0000\u0002\u00a2\u0001\u0000\u0000"+ - "\u0000\u0002\u00a4\u0001\u0000\u0000\u0000\u0002\u00a6\u0001\u0000\u0000"+ - "\u0000\u0002\u00a8\u0001\u0000\u0000\u0000\u0002\u00aa\u0001\u0000\u0000"+ - "\u0000\u0002\u00ac\u0001\u0000\u0000\u0000\u0002\u00ae\u0001\u0000\u0000"+ - "\u0000\u0002\u00b0\u0001\u0000\u0000\u0000\u0002\u00b2\u0001\u0000\u0000"+ - "\u0000\u0002\u00b4\u0001\u0000\u0000\u0000\u0002\u00b6\u0001\u0000\u0000"+ - "\u0000\u0002\u00b8\u0001\u0000\u0000\u0000\u0002\u00ba\u0001\u0000\u0000"+ - "\u0000\u0002\u00be\u0001\u0000\u0000\u0000\u0002\u00c0\u0001\u0000\u0000"+ - "\u0000\u0002\u00c2\u0001\u0000\u0000\u0000\u0002\u00c4\u0001\u0000\u0000"+ - "\u0000\u0003\u00c6\u0001\u0000\u0000\u0000\u0003\u00c8\u0001\u0000\u0000"+ - "\u0000\u0003\u00ca\u0001\u0000\u0000\u0000\u0003\u00cc\u0001\u0000\u0000"+ - "\u0000\u0003\u00ce\u0001\u0000\u0000\u0000\u0003\u00d0\u0001\u0000\u0000"+ - "\u0000\u0003\u00d2\u0001\u0000\u0000\u0000\u0003\u00d4\u0001\u0000\u0000"+ - "\u0000\u0003\u00d6\u0001\u0000\u0000\u0000\u0003\u00d8\u0001\u0000\u0000"+ - "\u0000\u0003\u00da\u0001\u0000\u0000\u0000\u0003\u00dc\u0001\u0000\u0000"+ - "\u0000\u0004\u00de\u0001\u0000\u0000\u0000\u0004\u00e0\u0001\u0000\u0000"+ - "\u0000\u0004\u00e2\u0001\u0000\u0000\u0000\u0004\u00e8\u0001\u0000\u0000"+ - "\u0000\u0004\u00ea\u0001\u0000\u0000\u0000\u0004\u00ec\u0001\u0000\u0000"+ - "\u0000\u0004\u00ee\u0001\u0000\u0000\u0000\u0005\u00f0\u0001\u0000\u0000"+ - "\u0000\u0005\u00f2\u0001\u0000\u0000\u0000\u0005\u00f4\u0001\u0000\u0000"+ - "\u0000\u0005\u00f6\u0001\u0000\u0000\u0000\u0005\u00f8\u0001\u0000\u0000"+ - "\u0000\u0005\u00fa\u0001\u0000\u0000\u0000\u0005\u00fc\u0001\u0000\u0000"+ - "\u0000\u0005\u00fe\u0001\u0000\u0000\u0000\u0005\u0100\u0001\u0000\u0000"+ - "\u0000\u0006\u0102\u0001\u0000\u0000\u0000\u0006\u0104\u0001\u0000\u0000"+ - "\u0000\u0006\u0106\u0001\u0000\u0000\u0000\u0006\u0108\u0001\u0000\u0000"+ - "\u0000\u0006\u010c\u0001\u0000\u0000\u0000\u0006\u010e\u0001\u0000\u0000"+ - "\u0000\u0006\u0110\u0001\u0000\u0000\u0000\u0006\u0112\u0001\u0000\u0000"+ - "\u0000\u0006\u0114\u0001\u0000\u0000\u0000\u0007\u0116\u0001\u0000\u0000"+ - "\u0000\u0007\u0118\u0001\u0000\u0000\u0000\u0007\u011a\u0001\u0000\u0000"+ - "\u0000\u0007\u011c\u0001\u0000\u0000\u0000\u0007\u011e\u0001\u0000\u0000"+ - "\u0000\u0007\u0120\u0001\u0000\u0000\u0000\u0007\u0122\u0001\u0000\u0000"+ - "\u0000\u0007\u0124\u0001\u0000\u0000\u0000\u0007\u0126\u0001\u0000\u0000"+ - "\u0000\u0007\u0128\u0001\u0000\u0000\u0000\b\u012a\u0001\u0000\u0000\u0000"+ - "\b\u012c\u0001\u0000\u0000\u0000\b\u012e\u0001\u0000\u0000\u0000\b\u0130"+ - "\u0001\u0000\u0000\u0000\b\u0132\u0001\u0000\u0000\u0000\b\u0134\u0001"+ - "\u0000\u0000\u0000\b\u0136\u0001\u0000\u0000\u0000\b\u0138\u0001\u0000"+ - "\u0000\u0000\b\u013a\u0001\u0000\u0000\u0000\b\u013c\u0001\u0000\u0000"+ - "\u0000\t\u013e\u0001\u0000\u0000\u0000\t\u0140\u0001\u0000\u0000\u0000"+ - "\t\u0142\u0001\u0000\u0000\u0000\t\u0144\u0001\u0000\u0000\u0000\t\u0146"+ - "\u0001\u0000\u0000\u0000\t\u0148\u0001\u0000\u0000\u0000\t\u014a\u0001"+ - "\u0000\u0000\u0000\n\u014c\u0001\u0000\u0000\u0000\n\u014e\u0001\u0000"+ - "\u0000\u0000\n\u0150\u0001\u0000\u0000\u0000\n\u0152\u0001\u0000\u0000"+ - "\u0000\n\u0154\u0001\u0000\u0000\u0000\n\u0156\u0001\u0000\u0000\u0000"+ - "\n\u0158\u0001\u0000\u0000\u0000\u000b\u015a\u0001\u0000\u0000\u0000\u000b"+ - "\u015c\u0001\u0000\u0000\u0000\u000b\u015e\u0001\u0000\u0000\u0000\u000b"+ - "\u0160\u0001\u0000\u0000\u0000\u000b\u0162\u0001\u0000\u0000\u0000\f\u0164"+ - "\u0001\u0000\u0000\u0000\f\u0166\u0001\u0000\u0000\u0000\f\u0168\u0001"+ - "\u0000\u0000\u0000\f\u016a\u0001\u0000\u0000\u0000\f\u016c\u0001\u0000"+ - "\u0000\u0000\r\u016e\u0001\u0000\u0000\u0000\r\u0170\u0001\u0000\u0000"+ - "\u0000\r\u0172\u0001\u0000\u0000\u0000\r\u0174\u0001\u0000\u0000\u0000"+ - "\r\u0176\u0001\u0000\u0000\u0000\r\u0178\u0001\u0000\u0000\u0000\u000e"+ - "\u017a\u0001\u0000\u0000\u0000\u000e\u017c\u0001\u0000\u0000\u0000\u000e"+ - "\u017e\u0001\u0000\u0000\u0000\u000e\u0180\u0001\u0000\u0000\u0000\u000e"+ - "\u0182\u0001\u0000\u0000\u0000\u000e\u0184\u0001\u0000\u0000\u0000\u000f"+ - "\u0186\u0001\u0000\u0000\u0000\u000f\u0188\u0001\u0000\u0000\u0000\u000f"+ - "\u018a\u0001\u0000\u0000\u0000\u000f\u018c\u0001\u0000\u0000\u0000\u000f"+ - "\u018e\u0001\u0000\u0000\u0000\u000f\u0190\u0001\u0000\u0000\u0000\u000f"+ - "\u0192\u0001\u0000\u0000\u0000\u000f\u0194\u0001\u0000\u0000\u0000\u000f"+ - "\u0196\u0001\u0000\u0000\u0000\u0010\u0198\u0001\u0000\u0000\u0000\u0012"+ - "\u01a2\u0001\u0000\u0000\u0000\u0014\u01a9\u0001\u0000\u0000\u0000\u0016"+ - "\u01b2\u0001\u0000\u0000\u0000\u0018\u01b9\u0001\u0000\u0000\u0000\u001a"+ - "\u01c3\u0001\u0000\u0000\u0000\u001c\u01ca\u0001\u0000\u0000\u0000\u001e"+ - "\u01d1\u0001\u0000\u0000\u0000 \u01df\u0001\u0000\u0000\u0000\"\u01e6"+ - "\u0001\u0000\u0000\u0000$\u01ee\u0001\u0000\u0000\u0000&\u01f7\u0001\u0000"+ - "\u0000\u0000(\u01fe\u0001\u0000\u0000\u0000*\u0208\u0001\u0000\u0000\u0000"+ - ",\u0214\u0001\u0000\u0000\u0000.\u021d\u0001\u0000\u0000\u00000\u0223"+ - "\u0001\u0000\u0000\u00002\u022a\u0001\u0000\u0000\u00004\u0231\u0001\u0000"+ - "\u0000\u00006\u0239\u0001\u0000\u0000\u00008\u0241\u0001\u0000\u0000\u0000"+ - ":\u024a\u0001\u0000\u0000\u0000<\u0250\u0001\u0000\u0000\u0000>\u0261"+ - "\u0001\u0000\u0000\u0000@\u0271\u0001\u0000\u0000\u0000B\u027a\u0001\u0000"+ - "\u0000\u0000D\u027d\u0001\u0000\u0000\u0000F\u0281\u0001\u0000\u0000\u0000"+ - "H\u0286\u0001\u0000\u0000\u0000J\u028b\u0001\u0000\u0000\u0000L\u028f"+ - "\u0001\u0000\u0000\u0000N\u0293\u0001\u0000\u0000\u0000P\u0297\u0001\u0000"+ - "\u0000\u0000R\u029b\u0001\u0000\u0000\u0000T\u029d\u0001\u0000\u0000\u0000"+ - "V\u029f\u0001\u0000\u0000\u0000X\u02a2\u0001\u0000\u0000\u0000Z\u02a4"+ - "\u0001\u0000\u0000\u0000\\\u02ad\u0001\u0000\u0000\u0000^\u02af\u0001"+ - "\u0000\u0000\u0000`\u02b4\u0001\u0000\u0000\u0000b\u02b6\u0001\u0000\u0000"+ - "\u0000d\u02bb\u0001\u0000\u0000\u0000f\u02da\u0001\u0000\u0000\u0000h"+ - "\u02dd\u0001\u0000\u0000\u0000j\u030b\u0001\u0000\u0000\u0000l\u030d\u0001"+ - "\u0000\u0000\u0000n\u0310\u0001\u0000\u0000\u0000p\u0314\u0001\u0000\u0000"+ - "\u0000r\u0318\u0001\u0000\u0000\u0000t\u031a\u0001\u0000\u0000\u0000v"+ - "\u031d\u0001\u0000\u0000\u0000x\u031f\u0001\u0000\u0000\u0000z\u0324\u0001"+ - "\u0000\u0000\u0000|\u0326\u0001\u0000\u0000\u0000~\u032c\u0001\u0000\u0000"+ - "\u0000\u0080\u0332\u0001\u0000\u0000\u0000\u0082\u0335\u0001\u0000\u0000"+ - "\u0000\u0084\u0338\u0001\u0000\u0000\u0000\u0086\u033d\u0001\u0000\u0000"+ - "\u0000\u0088\u0342\u0001\u0000\u0000\u0000\u008a\u0344\u0001\u0000\u0000"+ - "\u0000\u008c\u034a\u0001\u0000\u0000\u0000\u008e\u034e\u0001\u0000\u0000"+ - "\u0000\u0090\u0353\u0001\u0000\u0000\u0000\u0092\u0359\u0001\u0000\u0000"+ - "\u0000\u0094\u035c\u0001\u0000\u0000\u0000\u0096\u035e\u0001\u0000\u0000"+ - "\u0000\u0098\u0364\u0001\u0000\u0000\u0000\u009a\u0366\u0001\u0000\u0000"+ - "\u0000\u009c\u036b\u0001\u0000\u0000\u0000\u009e\u036e\u0001\u0000\u0000"+ - "\u0000\u00a0\u0371\u0001\u0000\u0000\u0000\u00a2\u0374\u0001\u0000\u0000"+ - "\u0000\u00a4\u0376\u0001\u0000\u0000\u0000\u00a6\u0379\u0001\u0000\u0000"+ - "\u0000\u00a8\u037b\u0001\u0000\u0000\u0000\u00aa\u037e\u0001\u0000\u0000"+ - "\u0000\u00ac\u0380\u0001\u0000\u0000\u0000\u00ae\u0382\u0001\u0000\u0000"+ - "\u0000\u00b0\u0384\u0001\u0000\u0000\u0000\u00b2\u0386\u0001\u0000\u0000"+ - "\u0000\u00b4\u0396\u0001\u0000\u0000\u0000\u00b6\u0398\u0001\u0000\u0000"+ - "\u0000\u00b8\u039d\u0001\u0000\u0000\u0000\u00ba\u03b2\u0001\u0000\u0000"+ - "\u0000\u00bc\u03b4\u0001\u0000\u0000\u0000\u00be\u03bc\u0001\u0000\u0000"+ - "\u0000\u00c0\u03be\u0001\u0000\u0000\u0000\u00c2\u03c2\u0001\u0000\u0000"+ - "\u0000\u00c4\u03c6\u0001\u0000\u0000\u0000\u00c6\u03ca\u0001\u0000\u0000"+ - "\u0000\u00c8\u03cf\u0001\u0000\u0000\u0000\u00ca\u03d3\u0001\u0000\u0000"+ - "\u0000\u00cc\u03d7\u0001\u0000\u0000\u0000\u00ce\u03db\u0001\u0000\u0000"+ - "\u0000\u00d0\u03df\u0001\u0000\u0000\u0000\u00d2\u03e3\u0001\u0000\u0000"+ - "\u0000\u00d4\u03ec\u0001\u0000\u0000\u0000\u00d6\u03f0\u0001\u0000\u0000"+ - "\u0000\u00d8\u03f4\u0001\u0000\u0000\u0000\u00da\u03f8\u0001\u0000\u0000"+ - "\u0000\u00dc\u03fc\u0001\u0000\u0000\u0000\u00de\u0400\u0001\u0000\u0000"+ - "\u0000\u00e0\u0405\u0001\u0000\u0000\u0000\u00e2\u0409\u0001\u0000\u0000"+ - "\u0000\u00e4\u0411\u0001\u0000\u0000\u0000\u00e6\u0426\u0001\u0000\u0000"+ - "\u0000\u00e8\u042a\u0001\u0000\u0000\u0000\u00ea\u042e\u0001\u0000\u0000"+ - "\u0000\u00ec\u0432\u0001\u0000\u0000\u0000\u00ee\u0436\u0001\u0000\u0000"+ - "\u0000\u00f0\u043a\u0001\u0000\u0000\u0000\u00f2\u043f\u0001\u0000\u0000"+ - "\u0000\u00f4\u0443\u0001\u0000\u0000\u0000\u00f6\u0447\u0001\u0000\u0000"+ - "\u0000\u00f8\u044b\u0001\u0000\u0000\u0000\u00fa\u044e\u0001\u0000\u0000"+ - "\u0000\u00fc\u0452\u0001\u0000\u0000\u0000\u00fe\u0456\u0001\u0000\u0000"+ - "\u0000\u0100\u045a\u0001\u0000\u0000\u0000\u0102\u045e\u0001\u0000\u0000"+ - "\u0000\u0104\u0463\u0001\u0000\u0000\u0000\u0106\u0468\u0001\u0000\u0000"+ - "\u0000\u0108\u046d\u0001\u0000\u0000\u0000\u010a\u0474\u0001\u0000\u0000"+ - "\u0000\u010c\u047d\u0001\u0000\u0000\u0000\u010e\u0484\u0001\u0000\u0000"+ - "\u0000\u0110\u0488\u0001\u0000\u0000\u0000\u0112\u048c\u0001\u0000\u0000"+ - "\u0000\u0114\u0490\u0001\u0000\u0000\u0000\u0116\u0494\u0001\u0000\u0000"+ - "\u0000\u0118\u049a\u0001\u0000\u0000\u0000\u011a\u049e\u0001\u0000\u0000"+ - "\u0000\u011c\u04a2\u0001\u0000\u0000\u0000\u011e\u04a6\u0001\u0000\u0000"+ - "\u0000\u0120\u04aa\u0001\u0000\u0000\u0000\u0122\u04ae\u0001\u0000\u0000"+ - "\u0000\u0124\u04b2\u0001\u0000\u0000\u0000\u0126\u04b6\u0001\u0000\u0000"+ - "\u0000\u0128\u04ba\u0001\u0000\u0000\u0000\u012a\u04be\u0001\u0000\u0000"+ - "\u0000\u012c\u04c3\u0001\u0000\u0000\u0000\u012e\u04c7\u0001\u0000\u0000"+ - "\u0000\u0130\u04cb\u0001\u0000\u0000\u0000\u0132\u04cf\u0001\u0000\u0000"+ - "\u0000\u0134\u04d4\u0001\u0000\u0000\u0000\u0136\u04d8\u0001\u0000\u0000"+ - "\u0000\u0138\u04dc\u0001\u0000\u0000\u0000\u013a\u04e0\u0001\u0000\u0000"+ - "\u0000\u013c\u04e4\u0001\u0000\u0000\u0000\u013e\u04e8\u0001\u0000\u0000"+ - "\u0000\u0140\u04ee\u0001\u0000\u0000\u0000\u0142\u04f2\u0001\u0000\u0000"+ - "\u0000\u0144\u04f6\u0001\u0000\u0000\u0000\u0146\u04fa\u0001\u0000\u0000"+ - "\u0000\u0148\u04fe\u0001\u0000\u0000\u0000\u014a\u0502\u0001\u0000\u0000"+ - "\u0000\u014c\u0506\u0001\u0000\u0000\u0000\u014e\u050b\u0001\u0000\u0000"+ - "\u0000\u0150\u050f\u0001\u0000\u0000\u0000\u0152\u0513\u0001\u0000\u0000"+ - "\u0000\u0154\u0517\u0001\u0000\u0000\u0000\u0156\u051b\u0001\u0000\u0000"+ - "\u0000\u0158\u051f\u0001\u0000\u0000\u0000\u015a\u0523\u0001\u0000\u0000"+ - "\u0000\u015c\u0528\u0001\u0000\u0000\u0000\u015e\u052d\u0001\u0000\u0000"+ - "\u0000\u0160\u0531\u0001\u0000\u0000\u0000\u0162\u0535\u0001\u0000\u0000"+ - "\u0000\u0164\u0539\u0001\u0000\u0000\u0000\u0166\u053e\u0001\u0000\u0000"+ - "\u0000\u0168\u0548\u0001\u0000\u0000\u0000\u016a\u054c\u0001\u0000\u0000"+ - "\u0000\u016c\u0550\u0001\u0000\u0000\u0000\u016e\u0554\u0001\u0000\u0000"+ - "\u0000\u0170\u0559\u0001\u0000\u0000\u0000\u0172\u0560\u0001\u0000\u0000"+ - "\u0000\u0174\u0564\u0001\u0000\u0000\u0000\u0176\u0568\u0001\u0000\u0000"+ - "\u0000\u0178\u056c\u0001\u0000\u0000\u0000\u017a\u0570\u0001\u0000\u0000"+ - "\u0000\u017c\u0575\u0001\u0000\u0000\u0000\u017e\u057b\u0001\u0000\u0000"+ - "\u0000\u0180\u0581\u0001\u0000\u0000\u0000\u0182\u0585\u0001\u0000\u0000"+ - "\u0000\u0184\u0589\u0001\u0000\u0000\u0000\u0186\u058d\u0001\u0000\u0000"+ - "\u0000\u0188\u0593\u0001\u0000\u0000\u0000\u018a\u0599\u0001\u0000\u0000"+ - "\u0000\u018c\u059d\u0001\u0000\u0000\u0000\u018e\u05a1\u0001\u0000\u0000"+ - "\u0000\u0190\u05a5\u0001\u0000\u0000\u0000\u0192\u05ab\u0001\u0000\u0000"+ - "\u0000\u0194\u05b1\u0001\u0000\u0000\u0000\u0196\u05b7\u0001\u0000\u0000"+ - "\u0000\u0198\u0199\u0005d\u0000\u0000\u0199\u019a\u0005i\u0000\u0000\u019a"+ - "\u019b\u0005s\u0000\u0000\u019b\u019c\u0005s\u0000\u0000\u019c\u019d\u0005"+ - "e\u0000\u0000\u019d\u019e\u0005c\u0000\u0000\u019e\u019f\u0005t\u0000"+ - "\u0000\u019f\u01a0\u0001\u0000\u0000\u0000\u01a0\u01a1\u0006\u0000\u0000"+ - "\u0000\u01a1\u0011\u0001\u0000\u0000\u0000\u01a2\u01a3\u0005d\u0000\u0000"+ - "\u01a3\u01a4\u0005r\u0000\u0000\u01a4\u01a5\u0005o\u0000\u0000\u01a5\u01a6"+ - "\u0005p\u0000\u0000\u01a6\u01a7\u0001\u0000\u0000\u0000\u01a7\u01a8\u0006"+ - "\u0001\u0001\u0000\u01a8\u0013\u0001\u0000\u0000\u0000\u01a9\u01aa\u0005"+ - "e\u0000\u0000\u01aa\u01ab\u0005n\u0000\u0000\u01ab\u01ac\u0005r\u0000"+ - "\u0000\u01ac\u01ad\u0005i\u0000\u0000\u01ad\u01ae\u0005c\u0000\u0000\u01ae"+ - "\u01af\u0005h\u0000\u0000\u01af\u01b0\u0001\u0000\u0000\u0000\u01b0\u01b1"+ - "\u0006\u0002\u0002\u0000\u01b1\u0015\u0001\u0000\u0000\u0000\u01b2\u01b3"+ - "\u0005e\u0000\u0000\u01b3\u01b4\u0005v\u0000\u0000\u01b4\u01b5\u0005a"+ - "\u0000\u0000\u01b5\u01b6\u0005l\u0000\u0000\u01b6\u01b7\u0001\u0000\u0000"+ - "\u0000\u01b7\u01b8\u0006\u0003\u0000\u0000\u01b8\u0017\u0001\u0000\u0000"+ - "\u0000\u01b9\u01ba\u0005e\u0000\u0000\u01ba\u01bb\u0005x\u0000\u0000\u01bb"+ - "\u01bc\u0005p\u0000\u0000\u01bc\u01bd\u0005l\u0000\u0000\u01bd\u01be\u0005"+ - "a\u0000\u0000\u01be\u01bf\u0005i\u0000\u0000\u01bf\u01c0\u0005n\u0000"+ - "\u0000\u01c0\u01c1\u0001\u0000\u0000\u0000\u01c1\u01c2\u0006\u0004\u0003"+ - "\u0000\u01c2\u0019\u0001\u0000\u0000\u0000\u01c3\u01c4\u0005f\u0000\u0000"+ - "\u01c4\u01c5\u0005r\u0000\u0000\u01c5\u01c6\u0005o\u0000\u0000\u01c6\u01c7"+ - "\u0005m\u0000\u0000\u01c7\u01c8\u0001\u0000\u0000\u0000\u01c8\u01c9\u0006"+ - "\u0005\u0004\u0000\u01c9\u001b\u0001\u0000\u0000\u0000\u01ca\u01cb\u0005"+ - "g\u0000\u0000\u01cb\u01cc\u0005r\u0000\u0000\u01cc\u01cd\u0005o\u0000"+ - "\u0000\u01cd\u01ce\u0005k\u0000\u0000\u01ce\u01cf\u0001\u0000\u0000\u0000"+ - "\u01cf\u01d0\u0006\u0006\u0000\u0000\u01d0\u001d\u0001\u0000\u0000\u0000"+ - "\u01d1\u01d2\u0005i\u0000\u0000\u01d2\u01d3\u0005n\u0000\u0000\u01d3\u01d4"+ - "\u0005l\u0000\u0000\u01d4\u01d5\u0005i\u0000\u0000\u01d5\u01d6\u0005n"+ - "\u0000\u0000\u01d6\u01d7\u0005e\u0000\u0000\u01d7\u01d8\u0005s\u0000\u0000"+ - "\u01d8\u01d9\u0005t\u0000\u0000\u01d9\u01da\u0005a\u0000\u0000\u01da\u01db"+ - "\u0005t\u0000\u0000\u01db\u01dc\u0005s\u0000\u0000\u01dc\u01dd\u0001\u0000"+ - "\u0000\u0000\u01dd\u01de\u0006\u0007\u0000\u0000\u01de\u001f\u0001\u0000"+ - "\u0000\u0000\u01df\u01e0\u0005k\u0000\u0000\u01e0\u01e1\u0005e\u0000\u0000"+ - "\u01e1\u01e2\u0005e\u0000\u0000\u01e2\u01e3\u0005p\u0000\u0000\u01e3\u01e4"+ - "\u0001\u0000\u0000\u0000\u01e4\u01e5\u0006\b\u0001\u0000\u01e5!\u0001"+ - "\u0000\u0000\u0000\u01e6\u01e7\u0005l\u0000\u0000\u01e7\u01e8\u0005i\u0000"+ - "\u0000\u01e8\u01e9\u0005m\u0000\u0000\u01e9\u01ea\u0005i\u0000\u0000\u01ea"+ - "\u01eb\u0005t\u0000\u0000\u01eb\u01ec\u0001\u0000\u0000\u0000\u01ec\u01ed"+ - "\u0006\t\u0000\u0000\u01ed#\u0001\u0000\u0000\u0000\u01ee\u01ef\u0005"+ - "l\u0000\u0000\u01ef\u01f0\u0005o\u0000\u0000\u01f0\u01f1\u0005o\u0000"+ - "\u0000\u01f1\u01f2\u0005k\u0000\u0000\u01f2\u01f3\u0005u\u0000\u0000\u01f3"+ - "\u01f4\u0005p\u0000\u0000\u01f4\u01f5\u0001\u0000\u0000\u0000\u01f5\u01f6"+ - "\u0006\n\u0005\u0000\u01f6%\u0001\u0000\u0000\u0000\u01f7\u01f8\u0005"+ - "m\u0000\u0000\u01f8\u01f9\u0005e\u0000\u0000\u01f9\u01fa\u0005t\u0000"+ - "\u0000\u01fa\u01fb\u0005a\u0000\u0000\u01fb\u01fc\u0001\u0000\u0000\u0000"+ - "\u01fc\u01fd\u0006\u000b\u0006\u0000\u01fd\'\u0001\u0000\u0000\u0000\u01fe"+ - "\u01ff\u0005m\u0000\u0000\u01ff\u0200\u0005e\u0000\u0000\u0200\u0201\u0005"+ - "t\u0000\u0000\u0201\u0202\u0005r\u0000\u0000\u0202\u0203\u0005i\u0000"+ - "\u0000\u0203\u0204\u0005c\u0000\u0000\u0204\u0205\u0005s\u0000\u0000\u0205"+ - "\u0206\u0001\u0000\u0000\u0000\u0206\u0207\u0006\f\u0007\u0000\u0207)"+ - "\u0001\u0000\u0000\u0000\u0208\u0209\u0005m\u0000\u0000\u0209\u020a\u0005"+ - "v\u0000\u0000\u020a\u020b\u0005_\u0000\u0000\u020b\u020c\u0005e\u0000"+ - "\u0000\u020c\u020d\u0005x\u0000\u0000\u020d\u020e\u0005p\u0000\u0000\u020e"+ - "\u020f\u0005a\u0000\u0000\u020f\u0210\u0005n\u0000\u0000\u0210\u0211\u0005"+ - "d\u0000\u0000\u0211\u0212\u0001\u0000\u0000\u0000\u0212\u0213\u0006\r"+ - "\b\u0000\u0213+\u0001\u0000\u0000\u0000\u0214\u0215\u0005r\u0000\u0000"+ + "R\u0003R\u038c\bR\u0001R\u0005R\u038f\bR\nR\fR\u0392\tR\u0001R\u0001R"+ + "\u0004R\u0396\bR\u000bR\fR\u0397\u0003R\u039a\bR\u0001S\u0001S\u0001S"+ + "\u0001S\u0001S\u0001T\u0001T\u0001T\u0001T\u0001T\u0001U\u0001U\u0005"+ + "U\u03a8\bU\nU\fU\u03ab\tU\u0001U\u0001U\u0003U\u03af\bU\u0001U\u0004U"+ + "\u03b2\bU\u000bU\fU\u03b3\u0003U\u03b6\bU\u0001V\u0001V\u0004V\u03ba\b"+ + "V\u000bV\fV\u03bb\u0001V\u0001V\u0001W\u0001W\u0001X\u0001X\u0001X\u0001"+ + "X\u0001Y\u0001Y\u0001Y\u0001Y\u0001Z\u0001Z\u0001Z\u0001Z\u0001[\u0001"+ + "[\u0001[\u0001[\u0001[\u0001\\\u0001\\\u0001\\\u0001\\\u0001]\u0001]\u0001"+ + "]\u0001]\u0001^\u0001^\u0001^\u0001^\u0001_\u0001_\u0001_\u0001_\u0001"+ + "`\u0001`\u0001`\u0001`\u0001a\u0001a\u0001a\u0001a\u0001a\u0001a\u0001"+ + "a\u0001a\u0001a\u0001b\u0001b\u0001b\u0001b\u0001c\u0001c\u0001c\u0001"+ + "c\u0001d\u0001d\u0001d\u0001d\u0001e\u0001e\u0001e\u0001e\u0001f\u0001"+ + "f\u0001f\u0001f\u0001g\u0001g\u0001g\u0001g\u0001g\u0001h\u0001h\u0001"+ + "h\u0001h\u0001i\u0001i\u0001i\u0001i\u0001j\u0001j\u0001j\u0001j\u0003"+ + "j\u0415\bj\u0001k\u0001k\u0003k\u0419\bk\u0001k\u0005k\u041c\bk\nk\fk"+ + "\u041f\tk\u0001k\u0001k\u0003k\u0423\bk\u0001k\u0004k\u0426\bk\u000bk"+ + "\fk\u0427\u0003k\u042a\bk\u0001l\u0001l\u0004l\u042e\bl\u000bl\fl\u042f"+ + "\u0001m\u0001m\u0001m\u0001m\u0001n\u0001n\u0001n\u0001n\u0001o\u0001"+ + "o\u0001o\u0001o\u0001p\u0001p\u0001p\u0001p\u0001p\u0001q\u0001q\u0001"+ + "q\u0001q\u0001r\u0001r\u0001r\u0001r\u0001s\u0001s\u0001s\u0001s\u0001"+ + "t\u0001t\u0001t\u0001u\u0001u\u0001u\u0001u\u0001v\u0001v\u0001v\u0001"+ + "v\u0001w\u0001w\u0001w\u0001w\u0001x\u0001x\u0001x\u0001x\u0001y\u0001"+ + "y\u0001y\u0001y\u0001y\u0001z\u0001z\u0001z\u0001z\u0001z\u0001{\u0001"+ + "{\u0001{\u0001{\u0001{\u0001|\u0001|\u0001|\u0001|\u0001|\u0001|\u0001"+ + "|\u0001}\u0001}\u0001~\u0004~\u047b\b~\u000b~\f~\u047c\u0001~\u0001~\u0003"+ + "~\u0481\b~\u0001~\u0004~\u0484\b~\u000b~\f~\u0485\u0001\u007f\u0001\u007f"+ + "\u0001\u007f\u0001\u007f\u0001\u0080\u0001\u0080\u0001\u0080\u0001\u0080"+ + "\u0001\u0081\u0001\u0081\u0001\u0081\u0001\u0081\u0001\u0082\u0001\u0082"+ + "\u0001\u0082\u0001\u0082\u0001\u0083\u0001\u0083\u0001\u0083\u0001\u0083"+ + "\u0001\u0083\u0001\u0083\u0001\u0084\u0001\u0084\u0001\u0084\u0001\u0084"+ + "\u0001\u0085\u0001\u0085\u0001\u0085\u0001\u0085\u0001\u0086\u0001\u0086"+ + "\u0001\u0086\u0001\u0086\u0001\u0087\u0001\u0087\u0001\u0087\u0001\u0087"+ + "\u0001\u0088\u0001\u0088\u0001\u0088\u0001\u0088\u0001\u0089\u0001\u0089"+ + "\u0001\u0089\u0001\u0089\u0001\u008a\u0001\u008a\u0001\u008a\u0001\u008a"+ + "\u0001\u008b\u0001\u008b\u0001\u008b\u0001\u008b\u0001\u008c\u0001\u008c"+ + "\u0001\u008c\u0001\u008c\u0001\u008d\u0001\u008d\u0001\u008d\u0001\u008d"+ + "\u0001\u008d\u0001\u008e\u0001\u008e\u0001\u008e\u0001\u008e\u0001\u008f"+ + "\u0001\u008f\u0001\u008f\u0001\u008f\u0001\u0090\u0001\u0090\u0001\u0090"+ + "\u0001\u0090\u0001\u0091\u0001\u0091\u0001\u0091\u0001\u0091\u0001\u0091"+ + "\u0001\u0092\u0001\u0092\u0001\u0092\u0001\u0092\u0001\u0093\u0001\u0093"+ + "\u0001\u0093\u0001\u0093\u0001\u0094\u0001\u0094\u0001\u0094\u0001\u0094"+ + "\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0096\u0001\u0096"+ + "\u0001\u0096\u0001\u0096\u0001\u0097\u0001\u0097\u0001\u0097\u0001\u0097"+ + "\u0001\u0097\u0001\u0097\u0001\u0098\u0001\u0098\u0001\u0098\u0001\u0098"+ + "\u0001\u0099\u0001\u0099\u0001\u0099\u0001\u0099\u0001\u009a\u0001\u009a"+ + "\u0001\u009a\u0001\u009a\u0001\u009b\u0001\u009b\u0001\u009b\u0001\u009b"+ + "\u0001\u009c\u0001\u009c\u0001\u009c\u0001\u009c\u0001\u009d\u0001\u009d"+ + "\u0001\u009d\u0001\u009d\u0001\u009e\u0001\u009e\u0001\u009e\u0001\u009e"+ + "\u0001\u009e\u0001\u009f\u0001\u009f\u0001\u009f\u0001\u009f\u0001\u00a0"+ + "\u0001\u00a0\u0001\u00a0\u0001\u00a0\u0001\u00a1\u0001\u00a1\u0001\u00a1"+ + "\u0001\u00a1\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a3"+ + "\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a4\u0001\u00a4\u0001\u00a4"+ + "\u0001\u00a4\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a5"+ + "\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a7"+ + "\u0001\u00a7\u0001\u00a7\u0001\u00a7\u0001\u00a8\u0001\u00a8\u0001\u00a8"+ + "\u0001\u00a8\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00aa"+ + "\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00ab\u0001\u00ab"+ + "\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ab"+ + "\u0001\u00ab\u0001\u00ab\u0001\u00ac\u0001\u00ac\u0001\u00ac\u0001\u00ac"+ + "\u0001\u00ad\u0001\u00ad\u0001\u00ad\u0001\u00ad\u0001\u00ae\u0001\u00ae"+ + "\u0001\u00ae\u0001\u00ae\u0001\u00af\u0001\u00af\u0001\u00af\u0001\u00af"+ + "\u0001\u00af\u0001\u00b0\u0001\u00b0\u0001\u00b1\u0001\u00b1\u0001\u00b1"+ + "\u0001\u00b1\u0001\u00b1\u0004\u00b1\u0564\b\u00b1\u000b\u00b1\f\u00b1"+ + "\u0565\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0001\u00b3\u0001"+ + "\u00b3\u0001\u00b3\u0001\u00b3\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0001"+ + "\u00b4\u0001\u00b5\u0001\u00b5\u0001\u00b5\u0001\u00b5\u0001\u00b5\u0001"+ + "\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001"+ + "\u00b7\u0001\u00b7\u0001\u00b7\u0001\u00b7\u0001\u00b7\u0001\u00b7\u0001"+ + "\u00b8\u0001\u00b8\u0001\u00b8\u0001\u00b8\u0001\u00b9\u0001\u00b9\u0001"+ + "\u00b9\u0001\u00b9\u0001\u00ba\u0001\u00ba\u0001\u00ba\u0001\u00ba\u0001"+ + "\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001"+ + "\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001"+ + "\u00bd\u0001\u00bd\u0001\u00bd\u0001\u00bd\u0001\u00be\u0001\u00be\u0001"+ + "\u00be\u0001\u00be\u0001\u00bf\u0001\u00bf\u0001\u00bf\u0001\u00bf\u0001"+ + "\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001"+ + "\u00c1\u0001\u00c1\u0001\u00c1\u0001\u00c1\u0001\u00c1\u0001\u00c1\u0001"+ + "\u00c2\u0001\u00c2\u0001\u00c2\u0001\u00c2\u0001\u00c2\u0001\u00c2\u0001"+ + "\u00c3\u0001\u00c3\u0001\u00c3\u0001\u00c3\u0001\u00c3\u0002\u0268\u02cd"+ + "\u0000\u00c4\u0010\u0001\u0012\u0002\u0014\u0003\u0016\u0004\u0018\u0005"+ + "\u001a\u0006\u001c\u0007\u001e\b \t\"\n$\u000b&\f(\r*\u000e,\u000f.\u0010"+ + "0\u00112\u00124\u00136\u00148\u0015:\u0016<\u0017>\u0018@\u0019B\u0000"+ + "D\u001aF\u0000H\u0000J\u001bL\u001cN\u001dP\u001eR\u0000T\u0000V\u0000"+ + "X\u0000Z\u0000\\\u0000^\u0000`\u0000b\u0000d\u0000f\u001fh j!l\"n#p$r"+ + "%t&v\'x(z)|*~+\u0080,\u0082-\u0084.\u0086/\u00880\u008a1\u008c2\u008e"+ + "3\u00904\u00925\u00946\u00967\u00988\u009a9\u009c:\u009e;\u00a0<\u00a2"+ + "=\u00a4>\u00a6?\u00a8@\u00aaA\u00acB\u00aeC\u00b0D\u00b2E\u00b4F\u00b6"+ + "G\u00b8H\u00baI\u00bc\u0000\u00beJ\u00c0K\u00c2L\u00c4M\u00c6\u0000\u00c8"+ + "\u0000\u00ca\u0000\u00cc\u0000\u00ce\u0000\u00d0\u0000\u00d2N\u00d4\u0000"+ + "\u00d6\u0000\u00d8O\u00daP\u00dcQ\u00de\u0000\u00e0\u0000\u00e2\u0000"+ + "\u00e4\u0000\u00e6\u0000\u00e8R\u00eaS\u00ecT\u00eeU\u00f0\u0000\u00f2"+ + "\u0000\u00f4\u0000\u00f6\u0000\u00f8V\u00fa\u0000\u00fcW\u00feX\u0100"+ + "Y\u0102\u0000\u0104\u0000\u0106Z\u0108[\u010a\u0000\u010c\\\u010e\u0000"+ + "\u0110]\u0112^\u0114_\u0116\u0000\u0118\u0000\u011a\u0000\u011c\u0000"+ + "\u011e\u0000\u0120\u0000\u0122\u0000\u0124`\u0126a\u0128b\u012a\u0000"+ + "\u012c\u0000\u012e\u0000\u0130\u0000\u0132\u0000\u0134\u0000\u0136\u0000"+ + "\u0138c\u013ad\u013ce\u013e\u0000\u0140\u0000\u0142\u0000\u0144\u0000"+ + "\u0146f\u0148g\u014ah\u014c\u0000\u014e\u0000\u0150\u0000\u0152\u0000"+ + "\u0154i\u0156j\u0158k\u015a\u0000\u015cl\u015em\u0160n\u0162o\u0164\u0000"+ + "\u0166p\u0168q\u016ar\u016cs\u016e\u0000\u0170t\u0172u\u0174v\u0176w\u0178"+ + "x\u017a\u0000\u017c\u0000\u017e\u0000\u0180y\u0182z\u0184{\u0186\u0000"+ + "\u0188\u0000\u018a|\u018c}\u018e~\u0190\u0000\u0192\u0000\u0194\u0000"+ + "\u0196\u0000\u0010\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007\b\t"+ + "\n\u000b\f\r\u000e\u000f\r\u0006\u0000\t\n\r\r //[[]]\u0002\u0000\n\n"+ + "\r\r\u0003\u0000\t\n\r\r \u000b\u0000\t\n\r\r \"\",,//::==[[]]||\u0002"+ + "\u0000**//\u0001\u000009\u0002\u0000AZaz\u0005\u0000\"\"\\\\nnrrtt\u0004"+ + "\u0000\n\n\r\r\"\"\\\\\u0002\u0000EEee\u0002\u0000++--\u0001\u0000``\u000b"+ + "\u0000\t\n\r\r \"#,,//::<<>?\\\\||\u05da\u0000\u0010\u0001\u0000\u0000"+ + "\u0000\u0000\u0012\u0001\u0000\u0000\u0000\u0000\u0014\u0001\u0000\u0000"+ + "\u0000\u0000\u0016\u0001\u0000\u0000\u0000\u0000\u0018\u0001\u0000\u0000"+ + "\u0000\u0000\u001a\u0001\u0000\u0000\u0000\u0000\u001c\u0001\u0000\u0000"+ + "\u0000\u0000\u001e\u0001\u0000\u0000\u0000\u0000 \u0001\u0000\u0000\u0000"+ + "\u0000\"\u0001\u0000\u0000\u0000\u0000$\u0001\u0000\u0000\u0000\u0000"+ + "&\u0001\u0000\u0000\u0000\u0000(\u0001\u0000\u0000\u0000\u0000*\u0001"+ + "\u0000\u0000\u0000\u0000,\u0001\u0000\u0000\u0000\u0000.\u0001\u0000\u0000"+ + "\u0000\u00000\u0001\u0000\u0000\u0000\u00002\u0001\u0000\u0000\u0000\u0000"+ + "4\u0001\u0000\u0000\u0000\u00006\u0001\u0000\u0000\u0000\u00008\u0001"+ + "\u0000\u0000\u0000\u0000:\u0001\u0000\u0000\u0000\u0000<\u0001\u0000\u0000"+ + "\u0000\u0000>\u0001\u0000\u0000\u0000\u0000@\u0001\u0000\u0000\u0000\u0000"+ + "D\u0001\u0000\u0000\u0000\u0001F\u0001\u0000\u0000\u0000\u0001H\u0001"+ + "\u0000\u0000\u0000\u0001J\u0001\u0000\u0000\u0000\u0001L\u0001\u0000\u0000"+ + "\u0000\u0001N\u0001\u0000\u0000\u0000\u0002P\u0001\u0000\u0000\u0000\u0002"+ + "f\u0001\u0000\u0000\u0000\u0002h\u0001\u0000\u0000\u0000\u0002j\u0001"+ + "\u0000\u0000\u0000\u0002l\u0001\u0000\u0000\u0000\u0002n\u0001\u0000\u0000"+ + "\u0000\u0002p\u0001\u0000\u0000\u0000\u0002r\u0001\u0000\u0000\u0000\u0002"+ + "t\u0001\u0000\u0000\u0000\u0002v\u0001\u0000\u0000\u0000\u0002x\u0001"+ + "\u0000\u0000\u0000\u0002z\u0001\u0000\u0000\u0000\u0002|\u0001\u0000\u0000"+ + "\u0000\u0002~\u0001\u0000\u0000\u0000\u0002\u0080\u0001\u0000\u0000\u0000"+ + "\u0002\u0082\u0001\u0000\u0000\u0000\u0002\u0084\u0001\u0000\u0000\u0000"+ + "\u0002\u0086\u0001\u0000\u0000\u0000\u0002\u0088\u0001\u0000\u0000\u0000"+ + "\u0002\u008a\u0001\u0000\u0000\u0000\u0002\u008c\u0001\u0000\u0000\u0000"+ + "\u0002\u008e\u0001\u0000\u0000\u0000\u0002\u0090\u0001\u0000\u0000\u0000"+ + "\u0002\u0092\u0001\u0000\u0000\u0000\u0002\u0094\u0001\u0000\u0000\u0000"+ + "\u0002\u0096\u0001\u0000\u0000\u0000\u0002\u0098\u0001\u0000\u0000\u0000"+ + "\u0002\u009a\u0001\u0000\u0000\u0000\u0002\u009c\u0001\u0000\u0000\u0000"+ + "\u0002\u009e\u0001\u0000\u0000\u0000\u0002\u00a0\u0001\u0000\u0000\u0000"+ + "\u0002\u00a2\u0001\u0000\u0000\u0000\u0002\u00a4\u0001\u0000\u0000\u0000"+ + "\u0002\u00a6\u0001\u0000\u0000\u0000\u0002\u00a8\u0001\u0000\u0000\u0000"+ + "\u0002\u00aa\u0001\u0000\u0000\u0000\u0002\u00ac\u0001\u0000\u0000\u0000"+ + "\u0002\u00ae\u0001\u0000\u0000\u0000\u0002\u00b0\u0001\u0000\u0000\u0000"+ + "\u0002\u00b2\u0001\u0000\u0000\u0000\u0002\u00b4\u0001\u0000\u0000\u0000"+ + "\u0002\u00b6\u0001\u0000\u0000\u0000\u0002\u00b8\u0001\u0000\u0000\u0000"+ + "\u0002\u00ba\u0001\u0000\u0000\u0000\u0002\u00be\u0001\u0000\u0000\u0000"+ + "\u0002\u00c0\u0001\u0000\u0000\u0000\u0002\u00c2\u0001\u0000\u0000\u0000"+ + "\u0002\u00c4\u0001\u0000\u0000\u0000\u0003\u00c6\u0001\u0000\u0000\u0000"+ + "\u0003\u00c8\u0001\u0000\u0000\u0000\u0003\u00ca\u0001\u0000\u0000\u0000"+ + "\u0003\u00cc\u0001\u0000\u0000\u0000\u0003\u00ce\u0001\u0000\u0000\u0000"+ + "\u0003\u00d0\u0001\u0000\u0000\u0000\u0003\u00d2\u0001\u0000\u0000\u0000"+ + "\u0003\u00d4\u0001\u0000\u0000\u0000\u0003\u00d6\u0001\u0000\u0000\u0000"+ + "\u0003\u00d8\u0001\u0000\u0000\u0000\u0003\u00da\u0001\u0000\u0000\u0000"+ + "\u0003\u00dc\u0001\u0000\u0000\u0000\u0004\u00de\u0001\u0000\u0000\u0000"+ + "\u0004\u00e0\u0001\u0000\u0000\u0000\u0004\u00e2\u0001\u0000\u0000\u0000"+ + "\u0004\u00e8\u0001\u0000\u0000\u0000\u0004\u00ea\u0001\u0000\u0000\u0000"+ + "\u0004\u00ec\u0001\u0000\u0000\u0000\u0004\u00ee\u0001\u0000\u0000\u0000"+ + "\u0005\u00f0\u0001\u0000\u0000\u0000\u0005\u00f2\u0001\u0000\u0000\u0000"+ + "\u0005\u00f4\u0001\u0000\u0000\u0000\u0005\u00f6\u0001\u0000\u0000\u0000"+ + "\u0005\u00f8\u0001\u0000\u0000\u0000\u0005\u00fa\u0001\u0000\u0000\u0000"+ + "\u0005\u00fc\u0001\u0000\u0000\u0000\u0005\u00fe\u0001\u0000\u0000\u0000"+ + "\u0005\u0100\u0001\u0000\u0000\u0000\u0006\u0102\u0001\u0000\u0000\u0000"+ + "\u0006\u0104\u0001\u0000\u0000\u0000\u0006\u0106\u0001\u0000\u0000\u0000"+ + "\u0006\u0108\u0001\u0000\u0000\u0000\u0006\u010c\u0001\u0000\u0000\u0000"+ + "\u0006\u010e\u0001\u0000\u0000\u0000\u0006\u0110\u0001\u0000\u0000\u0000"+ + "\u0006\u0112\u0001\u0000\u0000\u0000\u0006\u0114\u0001\u0000\u0000\u0000"+ + "\u0007\u0116\u0001\u0000\u0000\u0000\u0007\u0118\u0001\u0000\u0000\u0000"+ + "\u0007\u011a\u0001\u0000\u0000\u0000\u0007\u011c\u0001\u0000\u0000\u0000"+ + "\u0007\u011e\u0001\u0000\u0000\u0000\u0007\u0120\u0001\u0000\u0000\u0000"+ + "\u0007\u0122\u0001\u0000\u0000\u0000\u0007\u0124\u0001\u0000\u0000\u0000"+ + "\u0007\u0126\u0001\u0000\u0000\u0000\u0007\u0128\u0001\u0000\u0000\u0000"+ + "\b\u012a\u0001\u0000\u0000\u0000\b\u012c\u0001\u0000\u0000\u0000\b\u012e"+ + "\u0001\u0000\u0000\u0000\b\u0130\u0001\u0000\u0000\u0000\b\u0132\u0001"+ + "\u0000\u0000\u0000\b\u0134\u0001\u0000\u0000\u0000\b\u0136\u0001\u0000"+ + "\u0000\u0000\b\u0138\u0001\u0000\u0000\u0000\b\u013a\u0001\u0000\u0000"+ + "\u0000\b\u013c\u0001\u0000\u0000\u0000\t\u013e\u0001\u0000\u0000\u0000"+ + "\t\u0140\u0001\u0000\u0000\u0000\t\u0142\u0001\u0000\u0000\u0000\t\u0144"+ + "\u0001\u0000\u0000\u0000\t\u0146\u0001\u0000\u0000\u0000\t\u0148\u0001"+ + "\u0000\u0000\u0000\t\u014a\u0001\u0000\u0000\u0000\n\u014c\u0001\u0000"+ + "\u0000\u0000\n\u014e\u0001\u0000\u0000\u0000\n\u0150\u0001\u0000\u0000"+ + "\u0000\n\u0152\u0001\u0000\u0000\u0000\n\u0154\u0001\u0000\u0000\u0000"+ + "\n\u0156\u0001\u0000\u0000\u0000\n\u0158\u0001\u0000\u0000\u0000\u000b"+ + "\u015a\u0001\u0000\u0000\u0000\u000b\u015c\u0001\u0000\u0000\u0000\u000b"+ + "\u015e\u0001\u0000\u0000\u0000\u000b\u0160\u0001\u0000\u0000\u0000\u000b"+ + "\u0162\u0001\u0000\u0000\u0000\f\u0164\u0001\u0000\u0000\u0000\f\u0166"+ + "\u0001\u0000\u0000\u0000\f\u0168\u0001\u0000\u0000\u0000\f\u016a\u0001"+ + "\u0000\u0000\u0000\f\u016c\u0001\u0000\u0000\u0000\r\u016e\u0001\u0000"+ + "\u0000\u0000\r\u0170\u0001\u0000\u0000\u0000\r\u0172\u0001\u0000\u0000"+ + "\u0000\r\u0174\u0001\u0000\u0000\u0000\r\u0176\u0001\u0000\u0000\u0000"+ + "\r\u0178\u0001\u0000\u0000\u0000\u000e\u017a\u0001\u0000\u0000\u0000\u000e"+ + "\u017c\u0001\u0000\u0000\u0000\u000e\u017e\u0001\u0000\u0000\u0000\u000e"+ + "\u0180\u0001\u0000\u0000\u0000\u000e\u0182\u0001\u0000\u0000\u0000\u000e"+ + "\u0184\u0001\u0000\u0000\u0000\u000f\u0186\u0001\u0000\u0000\u0000\u000f"+ + "\u0188\u0001\u0000\u0000\u0000\u000f\u018a\u0001\u0000\u0000\u0000\u000f"+ + "\u018c\u0001\u0000\u0000\u0000\u000f\u018e\u0001\u0000\u0000\u0000\u000f"+ + "\u0190\u0001\u0000\u0000\u0000\u000f\u0192\u0001\u0000\u0000\u0000\u000f"+ + "\u0194\u0001\u0000\u0000\u0000\u000f\u0196\u0001\u0000\u0000\u0000\u0010"+ + "\u0198\u0001\u0000\u0000\u0000\u0012\u01a2\u0001\u0000\u0000\u0000\u0014"+ + "\u01a9\u0001\u0000\u0000\u0000\u0016\u01b2\u0001\u0000\u0000\u0000\u0018"+ + "\u01b9\u0001\u0000\u0000\u0000\u001a\u01c3\u0001\u0000\u0000\u0000\u001c"+ + "\u01ca\u0001\u0000\u0000\u0000\u001e\u01d1\u0001\u0000\u0000\u0000 \u01df"+ + "\u0001\u0000\u0000\u0000\"\u01e6\u0001\u0000\u0000\u0000$\u01ee\u0001"+ + "\u0000\u0000\u0000&\u01f7\u0001\u0000\u0000\u0000(\u01fe\u0001\u0000\u0000"+ + "\u0000*\u0208\u0001\u0000\u0000\u0000,\u0214\u0001\u0000\u0000\u0000."+ + "\u021d\u0001\u0000\u0000\u00000\u0223\u0001\u0000\u0000\u00002\u022a\u0001"+ + "\u0000\u0000\u00004\u0231\u0001\u0000\u0000\u00006\u0239\u0001\u0000\u0000"+ + "\u00008\u0241\u0001\u0000\u0000\u0000:\u024a\u0001\u0000\u0000\u0000<"+ + "\u0250\u0001\u0000\u0000\u0000>\u0261\u0001\u0000\u0000\u0000@\u0271\u0001"+ + "\u0000\u0000\u0000B\u027a\u0001\u0000\u0000\u0000D\u027d\u0001\u0000\u0000"+ + "\u0000F\u0281\u0001\u0000\u0000\u0000H\u0286\u0001\u0000\u0000\u0000J"+ + "\u028b\u0001\u0000\u0000\u0000L\u028f\u0001\u0000\u0000\u0000N\u0293\u0001"+ + "\u0000\u0000\u0000P\u0297\u0001\u0000\u0000\u0000R\u029b\u0001\u0000\u0000"+ + "\u0000T\u029d\u0001\u0000\u0000\u0000V\u029f\u0001\u0000\u0000\u0000X"+ + "\u02a2\u0001\u0000\u0000\u0000Z\u02a4\u0001\u0000\u0000\u0000\\\u02ad"+ + "\u0001\u0000\u0000\u0000^\u02af\u0001\u0000\u0000\u0000`\u02b4\u0001\u0000"+ + "\u0000\u0000b\u02b6\u0001\u0000\u0000\u0000d\u02bb\u0001\u0000\u0000\u0000"+ + "f\u02da\u0001\u0000\u0000\u0000h\u02dd\u0001\u0000\u0000\u0000j\u030b"+ + "\u0001\u0000\u0000\u0000l\u030d\u0001\u0000\u0000\u0000n\u0310\u0001\u0000"+ + "\u0000\u0000p\u0314\u0001\u0000\u0000\u0000r\u0318\u0001\u0000\u0000\u0000"+ + "t\u031a\u0001\u0000\u0000\u0000v\u031d\u0001\u0000\u0000\u0000x\u031f"+ + "\u0001\u0000\u0000\u0000z\u0324\u0001\u0000\u0000\u0000|\u0326\u0001\u0000"+ + "\u0000\u0000~\u032c\u0001\u0000\u0000\u0000\u0080\u0332\u0001\u0000\u0000"+ + "\u0000\u0082\u0335\u0001\u0000\u0000\u0000\u0084\u0338\u0001\u0000\u0000"+ + "\u0000\u0086\u033d\u0001\u0000\u0000\u0000\u0088\u0342\u0001\u0000\u0000"+ + "\u0000\u008a\u0344\u0001\u0000\u0000\u0000\u008c\u034a\u0001\u0000\u0000"+ + "\u0000\u008e\u034e\u0001\u0000\u0000\u0000\u0090\u0353\u0001\u0000\u0000"+ + "\u0000\u0092\u0359\u0001\u0000\u0000\u0000\u0094\u035c\u0001\u0000\u0000"+ + "\u0000\u0096\u035e\u0001\u0000\u0000\u0000\u0098\u0364\u0001\u0000\u0000"+ + "\u0000\u009a\u0366\u0001\u0000\u0000\u0000\u009c\u036b\u0001\u0000\u0000"+ + "\u0000\u009e\u036e\u0001\u0000\u0000\u0000\u00a0\u0371\u0001\u0000\u0000"+ + "\u0000\u00a2\u0374\u0001\u0000\u0000\u0000\u00a4\u0376\u0001\u0000\u0000"+ + "\u0000\u00a6\u0379\u0001\u0000\u0000\u0000\u00a8\u037b\u0001\u0000\u0000"+ + "\u0000\u00aa\u037e\u0001\u0000\u0000\u0000\u00ac\u0380\u0001\u0000\u0000"+ + "\u0000\u00ae\u0382\u0001\u0000\u0000\u0000\u00b0\u0384\u0001\u0000\u0000"+ + "\u0000\u00b2\u0386\u0001\u0000\u0000\u0000\u00b4\u0399\u0001\u0000\u0000"+ + "\u0000\u00b6\u039b\u0001\u0000\u0000\u0000\u00b8\u03a0\u0001\u0000\u0000"+ + "\u0000\u00ba\u03b5\u0001\u0000\u0000\u0000\u00bc\u03b7\u0001\u0000\u0000"+ + "\u0000\u00be\u03bf\u0001\u0000\u0000\u0000\u00c0\u03c1\u0001\u0000\u0000"+ + "\u0000\u00c2\u03c5\u0001\u0000\u0000\u0000\u00c4\u03c9\u0001\u0000\u0000"+ + "\u0000\u00c6\u03cd\u0001\u0000\u0000\u0000\u00c8\u03d2\u0001\u0000\u0000"+ + "\u0000\u00ca\u03d6\u0001\u0000\u0000\u0000\u00cc\u03da\u0001\u0000\u0000"+ + "\u0000\u00ce\u03de\u0001\u0000\u0000\u0000\u00d0\u03e2\u0001\u0000\u0000"+ + "\u0000\u00d2\u03e6\u0001\u0000\u0000\u0000\u00d4\u03ef\u0001\u0000\u0000"+ + "\u0000\u00d6\u03f3\u0001\u0000\u0000\u0000\u00d8\u03f7\u0001\u0000\u0000"+ + "\u0000\u00da\u03fb\u0001\u0000\u0000\u0000\u00dc\u03ff\u0001\u0000\u0000"+ + "\u0000\u00de\u0403\u0001\u0000\u0000\u0000\u00e0\u0408\u0001\u0000\u0000"+ + "\u0000\u00e2\u040c\u0001\u0000\u0000\u0000\u00e4\u0414\u0001\u0000\u0000"+ + "\u0000\u00e6\u0429\u0001\u0000\u0000\u0000\u00e8\u042d\u0001\u0000\u0000"+ + "\u0000\u00ea\u0431\u0001\u0000\u0000\u0000\u00ec\u0435\u0001\u0000\u0000"+ + "\u0000\u00ee\u0439\u0001\u0000\u0000\u0000\u00f0\u043d\u0001\u0000\u0000"+ + "\u0000\u00f2\u0442\u0001\u0000\u0000\u0000\u00f4\u0446\u0001\u0000\u0000"+ + "\u0000\u00f6\u044a\u0001\u0000\u0000\u0000\u00f8\u044e\u0001\u0000\u0000"+ + "\u0000\u00fa\u0451\u0001\u0000\u0000\u0000\u00fc\u0455\u0001\u0000\u0000"+ + "\u0000\u00fe\u0459\u0001\u0000\u0000\u0000\u0100\u045d\u0001\u0000\u0000"+ + "\u0000\u0102\u0461\u0001\u0000\u0000\u0000\u0104\u0466\u0001\u0000\u0000"+ + "\u0000\u0106\u046b\u0001\u0000\u0000\u0000\u0108\u0470\u0001\u0000\u0000"+ + "\u0000\u010a\u0477\u0001\u0000\u0000\u0000\u010c\u0480\u0001\u0000\u0000"+ + "\u0000\u010e\u0487\u0001\u0000\u0000\u0000\u0110\u048b\u0001\u0000\u0000"+ + "\u0000\u0112\u048f\u0001\u0000\u0000\u0000\u0114\u0493\u0001\u0000\u0000"+ + "\u0000\u0116\u0497\u0001\u0000\u0000\u0000\u0118\u049d\u0001\u0000\u0000"+ + "\u0000\u011a\u04a1\u0001\u0000\u0000\u0000\u011c\u04a5\u0001\u0000\u0000"+ + "\u0000\u011e\u04a9\u0001\u0000\u0000\u0000\u0120\u04ad\u0001\u0000\u0000"+ + "\u0000\u0122\u04b1\u0001\u0000\u0000\u0000\u0124\u04b5\u0001\u0000\u0000"+ + "\u0000\u0126\u04b9\u0001\u0000\u0000\u0000\u0128\u04bd\u0001\u0000\u0000"+ + "\u0000\u012a\u04c1\u0001\u0000\u0000\u0000\u012c\u04c6\u0001\u0000\u0000"+ + "\u0000\u012e\u04ca\u0001\u0000\u0000\u0000\u0130\u04ce\u0001\u0000\u0000"+ + "\u0000\u0132\u04d2\u0001\u0000\u0000\u0000\u0134\u04d7\u0001\u0000\u0000"+ + "\u0000\u0136\u04db\u0001\u0000\u0000\u0000\u0138\u04df\u0001\u0000\u0000"+ + "\u0000\u013a\u04e3\u0001\u0000\u0000\u0000\u013c\u04e7\u0001\u0000\u0000"+ + "\u0000\u013e\u04eb\u0001\u0000\u0000\u0000\u0140\u04f1\u0001\u0000\u0000"+ + "\u0000\u0142\u04f5\u0001\u0000\u0000\u0000\u0144\u04f9\u0001\u0000\u0000"+ + "\u0000\u0146\u04fd\u0001\u0000\u0000\u0000\u0148\u0501\u0001\u0000\u0000"+ + "\u0000\u014a\u0505\u0001\u0000\u0000\u0000\u014c\u0509\u0001\u0000\u0000"+ + "\u0000\u014e\u050e\u0001\u0000\u0000\u0000\u0150\u0512\u0001\u0000\u0000"+ + "\u0000\u0152\u0516\u0001\u0000\u0000\u0000\u0154\u051a\u0001\u0000\u0000"+ + "\u0000\u0156\u051e\u0001\u0000\u0000\u0000\u0158\u0522\u0001\u0000\u0000"+ + "\u0000\u015a\u0526\u0001\u0000\u0000\u0000\u015c\u052b\u0001\u0000\u0000"+ + "\u0000\u015e\u0530\u0001\u0000\u0000\u0000\u0160\u0534\u0001\u0000\u0000"+ + "\u0000\u0162\u0538\u0001\u0000\u0000\u0000\u0164\u053c\u0001\u0000\u0000"+ + "\u0000\u0166\u0541\u0001\u0000\u0000\u0000\u0168\u054b\u0001\u0000\u0000"+ + "\u0000\u016a\u054f\u0001\u0000\u0000\u0000\u016c\u0553\u0001\u0000\u0000"+ + "\u0000\u016e\u0557\u0001\u0000\u0000\u0000\u0170\u055c\u0001\u0000\u0000"+ + "\u0000\u0172\u0563\u0001\u0000\u0000\u0000\u0174\u0567\u0001\u0000\u0000"+ + "\u0000\u0176\u056b\u0001\u0000\u0000\u0000\u0178\u056f\u0001\u0000\u0000"+ + "\u0000\u017a\u0573\u0001\u0000\u0000\u0000\u017c\u0578\u0001\u0000\u0000"+ + "\u0000\u017e\u057e\u0001\u0000\u0000\u0000\u0180\u0584\u0001\u0000\u0000"+ + "\u0000\u0182\u0588\u0001\u0000\u0000\u0000\u0184\u058c\u0001\u0000\u0000"+ + "\u0000\u0186\u0590\u0001\u0000\u0000\u0000\u0188\u0596\u0001\u0000\u0000"+ + "\u0000\u018a\u059c\u0001\u0000\u0000\u0000\u018c\u05a0\u0001\u0000\u0000"+ + "\u0000\u018e\u05a4\u0001\u0000\u0000\u0000\u0190\u05a8\u0001\u0000\u0000"+ + "\u0000\u0192\u05ae\u0001\u0000\u0000\u0000\u0194\u05b4\u0001\u0000\u0000"+ + "\u0000\u0196\u05ba\u0001\u0000\u0000\u0000\u0198\u0199\u0005d\u0000\u0000"+ + "\u0199\u019a\u0005i\u0000\u0000\u019a\u019b\u0005s\u0000\u0000\u019b\u019c"+ + "\u0005s\u0000\u0000\u019c\u019d\u0005e\u0000\u0000\u019d\u019e\u0005c"+ + "\u0000\u0000\u019e\u019f\u0005t\u0000\u0000\u019f\u01a0\u0001\u0000\u0000"+ + "\u0000\u01a0\u01a1\u0006\u0000\u0000\u0000\u01a1\u0011\u0001\u0000\u0000"+ + "\u0000\u01a2\u01a3\u0005d\u0000\u0000\u01a3\u01a4\u0005r\u0000\u0000\u01a4"+ + "\u01a5\u0005o\u0000\u0000\u01a5\u01a6\u0005p\u0000\u0000\u01a6\u01a7\u0001"+ + "\u0000\u0000\u0000\u01a7\u01a8\u0006\u0001\u0001\u0000\u01a8\u0013\u0001"+ + "\u0000\u0000\u0000\u01a9\u01aa\u0005e\u0000\u0000\u01aa\u01ab\u0005n\u0000"+ + "\u0000\u01ab\u01ac\u0005r\u0000\u0000\u01ac\u01ad\u0005i\u0000\u0000\u01ad"+ + "\u01ae\u0005c\u0000\u0000\u01ae\u01af\u0005h\u0000\u0000\u01af\u01b0\u0001"+ + "\u0000\u0000\u0000\u01b0\u01b1\u0006\u0002\u0002\u0000\u01b1\u0015\u0001"+ + "\u0000\u0000\u0000\u01b2\u01b3\u0005e\u0000\u0000\u01b3\u01b4\u0005v\u0000"+ + "\u0000\u01b4\u01b5\u0005a\u0000\u0000\u01b5\u01b6\u0005l\u0000\u0000\u01b6"+ + "\u01b7\u0001\u0000\u0000\u0000\u01b7\u01b8\u0006\u0003\u0000\u0000\u01b8"+ + "\u0017\u0001\u0000\u0000\u0000\u01b9\u01ba\u0005e\u0000\u0000\u01ba\u01bb"+ + "\u0005x\u0000\u0000\u01bb\u01bc\u0005p\u0000\u0000\u01bc\u01bd\u0005l"+ + "\u0000\u0000\u01bd\u01be\u0005a\u0000\u0000\u01be\u01bf\u0005i\u0000\u0000"+ + "\u01bf\u01c0\u0005n\u0000\u0000\u01c0\u01c1\u0001\u0000\u0000\u0000\u01c1"+ + "\u01c2\u0006\u0004\u0003\u0000\u01c2\u0019\u0001\u0000\u0000\u0000\u01c3"+ + "\u01c4\u0005f\u0000\u0000\u01c4\u01c5\u0005r\u0000\u0000\u01c5\u01c6\u0005"+ + "o\u0000\u0000\u01c6\u01c7\u0005m\u0000\u0000\u01c7\u01c8\u0001\u0000\u0000"+ + "\u0000\u01c8\u01c9\u0006\u0005\u0004\u0000\u01c9\u001b\u0001\u0000\u0000"+ + "\u0000\u01ca\u01cb\u0005g\u0000\u0000\u01cb\u01cc\u0005r\u0000\u0000\u01cc"+ + "\u01cd\u0005o\u0000\u0000\u01cd\u01ce\u0005k\u0000\u0000\u01ce\u01cf\u0001"+ + "\u0000\u0000\u0000\u01cf\u01d0\u0006\u0006\u0000\u0000\u01d0\u001d\u0001"+ + "\u0000\u0000\u0000\u01d1\u01d2\u0005i\u0000\u0000\u01d2\u01d3\u0005n\u0000"+ + "\u0000\u01d3\u01d4\u0005l\u0000\u0000\u01d4\u01d5\u0005i\u0000\u0000\u01d5"+ + "\u01d6\u0005n\u0000\u0000\u01d6\u01d7\u0005e\u0000\u0000\u01d7\u01d8\u0005"+ + "s\u0000\u0000\u01d8\u01d9\u0005t\u0000\u0000\u01d9\u01da\u0005a\u0000"+ + "\u0000\u01da\u01db\u0005t\u0000\u0000\u01db\u01dc\u0005s\u0000\u0000\u01dc"+ + "\u01dd\u0001\u0000\u0000\u0000\u01dd\u01de\u0006\u0007\u0000\u0000\u01de"+ + "\u001f\u0001\u0000\u0000\u0000\u01df\u01e0\u0005k\u0000\u0000\u01e0\u01e1"+ + "\u0005e\u0000\u0000\u01e1\u01e2\u0005e\u0000\u0000\u01e2\u01e3\u0005p"+ + "\u0000\u0000\u01e3\u01e4\u0001\u0000\u0000\u0000\u01e4\u01e5\u0006\b\u0001"+ + "\u0000\u01e5!\u0001\u0000\u0000\u0000\u01e6\u01e7\u0005l\u0000\u0000\u01e7"+ + "\u01e8\u0005i\u0000\u0000\u01e8\u01e9\u0005m\u0000\u0000\u01e9\u01ea\u0005"+ + "i\u0000\u0000\u01ea\u01eb\u0005t\u0000\u0000\u01eb\u01ec\u0001\u0000\u0000"+ + "\u0000\u01ec\u01ed\u0006\t\u0000\u0000\u01ed#\u0001\u0000\u0000\u0000"+ + "\u01ee\u01ef\u0005l\u0000\u0000\u01ef\u01f0\u0005o\u0000\u0000\u01f0\u01f1"+ + "\u0005o\u0000\u0000\u01f1\u01f2\u0005k\u0000\u0000\u01f2\u01f3\u0005u"+ + "\u0000\u0000\u01f3\u01f4\u0005p\u0000\u0000\u01f4\u01f5\u0001\u0000\u0000"+ + "\u0000\u01f5\u01f6\u0006\n\u0005\u0000\u01f6%\u0001\u0000\u0000\u0000"+ + "\u01f7\u01f8\u0005m\u0000\u0000\u01f8\u01f9\u0005e\u0000\u0000\u01f9\u01fa"+ + "\u0005t\u0000\u0000\u01fa\u01fb\u0005a\u0000\u0000\u01fb\u01fc\u0001\u0000"+ + "\u0000\u0000\u01fc\u01fd\u0006\u000b\u0006\u0000\u01fd\'\u0001\u0000\u0000"+ + "\u0000\u01fe\u01ff\u0005m\u0000\u0000\u01ff\u0200\u0005e\u0000\u0000\u0200"+ + "\u0201\u0005t\u0000\u0000\u0201\u0202\u0005r\u0000\u0000\u0202\u0203\u0005"+ + "i\u0000\u0000\u0203\u0204\u0005c\u0000\u0000\u0204\u0205\u0005s\u0000"+ + "\u0000\u0205\u0206\u0001\u0000\u0000\u0000\u0206\u0207\u0006\f\u0007\u0000"+ + "\u0207)\u0001\u0000\u0000\u0000\u0208\u0209\u0005m\u0000\u0000\u0209\u020a"+ + "\u0005v\u0000\u0000\u020a\u020b\u0005_\u0000\u0000\u020b\u020c\u0005e"+ + "\u0000\u0000\u020c\u020d\u0005x\u0000\u0000\u020d\u020e\u0005p\u0000\u0000"+ + "\u020e\u020f\u0005a\u0000\u0000\u020f\u0210\u0005n\u0000\u0000\u0210\u0211"+ + "\u0005d\u0000\u0000\u0211\u0212\u0001\u0000\u0000\u0000\u0212\u0213\u0006"+ + "\r\b\u0000\u0213+\u0001\u0000\u0000\u0000\u0214\u0215\u0005r\u0000\u0000"+ "\u0215\u0216\u0005e\u0000\u0000\u0216\u0217\u0005n\u0000\u0000\u0217\u0218"+ "\u0005a\u0000\u0000\u0218\u0219\u0005m\u0000\u0000\u0219\u021a\u0005e"+ "\u0000\u0000\u021a\u021b\u0001\u0000\u0000\u0000\u021b\u021c\u0006\u000e"+ @@ -861,294 +861,296 @@ public EsqlBaseLexer(CharStream input) { "\u0001\u0000\u0000\u0000\u0382\u0383\u0005*\u0000\u0000\u0383\u00af\u0001"+ "\u0000\u0000\u0000\u0384\u0385\u0005/\u0000\u0000\u0385\u00b1\u0001\u0000"+ "\u0000\u0000\u0386\u0387\u0005%\u0000\u0000\u0387\u00b3\u0001\u0000\u0000"+ - "\u0000\u0388\u0389\u0003\u0094B\u0000\u0389\u038d\u0003T\"\u0000\u038a"+ - "\u038c\u0003d*\u0000\u038b\u038a\u0001\u0000\u0000\u0000\u038c\u038f\u0001"+ - "\u0000\u0000\u0000\u038d\u038b\u0001\u0000\u0000\u0000\u038d\u038e\u0001"+ - "\u0000\u0000\u0000\u038e\u0397\u0001\u0000\u0000\u0000\u038f\u038d\u0001"+ - "\u0000\u0000\u0000\u0390\u0392\u0003\u0094B\u0000\u0391\u0393\u0003R!"+ - "\u0000\u0392\u0391\u0001\u0000\u0000\u0000\u0393\u0394\u0001\u0000\u0000"+ - "\u0000\u0394\u0392\u0001\u0000\u0000\u0000\u0394\u0395\u0001\u0000\u0000"+ - "\u0000\u0395\u0397\u0001\u0000\u0000\u0000\u0396\u0388\u0001\u0000\u0000"+ - "\u0000\u0396\u0390\u0001\u0000\u0000\u0000\u0397\u00b5\u0001\u0000\u0000"+ - "\u0000\u0398\u0399\u0005[\u0000\u0000\u0399\u039a\u0001\u0000\u0000\u0000"+ - "\u039a\u039b\u0006S\u0000\u0000\u039b\u039c\u0006S\u0000\u0000\u039c\u00b7"+ - "\u0001\u0000\u0000\u0000\u039d\u039e\u0005]\u0000\u0000\u039e\u039f\u0001"+ - "\u0000\u0000\u0000\u039f\u03a0\u0006T\u000f\u0000\u03a0\u03a1\u0006T\u000f"+ - "\u0000\u03a1\u00b9\u0001\u0000\u0000\u0000\u03a2\u03a6\u0003T\"\u0000"+ - "\u03a3\u03a5\u0003d*\u0000\u03a4\u03a3\u0001\u0000\u0000\u0000\u03a5\u03a8"+ - "\u0001\u0000\u0000\u0000\u03a6\u03a4\u0001\u0000\u0000\u0000\u03a6\u03a7"+ - "\u0001\u0000\u0000\u0000\u03a7\u03b3\u0001\u0000\u0000\u0000\u03a8\u03a6"+ - "\u0001\u0000\u0000\u0000\u03a9\u03ac\u0003b)\u0000\u03aa\u03ac\u0003\\"+ - "&\u0000\u03ab\u03a9\u0001\u0000\u0000\u0000\u03ab\u03aa\u0001\u0000\u0000"+ - "\u0000\u03ac\u03ae\u0001\u0000\u0000\u0000\u03ad\u03af\u0003d*\u0000\u03ae"+ - "\u03ad\u0001\u0000\u0000\u0000\u03af\u03b0\u0001\u0000\u0000\u0000\u03b0"+ - "\u03ae\u0001\u0000\u0000\u0000\u03b0\u03b1\u0001\u0000\u0000\u0000\u03b1"+ - "\u03b3\u0001\u0000\u0000\u0000\u03b2\u03a2\u0001\u0000\u0000\u0000\u03b2"+ - "\u03ab\u0001\u0000\u0000\u0000\u03b3\u00bb\u0001\u0000\u0000\u0000\u03b4"+ - "\u03b6\u0003^\'\u0000\u03b5\u03b7\u0003`(\u0000\u03b6\u03b5\u0001\u0000"+ - "\u0000\u0000\u03b7\u03b8\u0001\u0000\u0000\u0000\u03b8\u03b6\u0001\u0000"+ - "\u0000\u0000\u03b8\u03b9\u0001\u0000\u0000\u0000\u03b9\u03ba\u0001\u0000"+ - "\u0000\u0000\u03ba\u03bb\u0003^\'\u0000\u03bb\u00bd\u0001\u0000\u0000"+ - "\u0000\u03bc\u03bd\u0003\u00bcV\u0000\u03bd\u00bf\u0001\u0000\u0000\u0000"+ - "\u03be\u03bf\u0003<\u0016\u0000\u03bf\u03c0\u0001\u0000\u0000\u0000\u03c0"+ - "\u03c1\u0006X\u000b\u0000\u03c1\u00c1\u0001\u0000\u0000\u0000\u03c2\u03c3"+ - "\u0003>\u0017\u0000\u03c3\u03c4\u0001\u0000\u0000\u0000\u03c4\u03c5\u0006"+ - "Y\u000b\u0000\u03c5\u00c3\u0001\u0000\u0000\u0000\u03c6\u03c7\u0003@\u0018"+ - "\u0000\u03c7\u03c8\u0001\u0000\u0000\u0000\u03c8\u03c9\u0006Z\u000b\u0000"+ - "\u03c9\u00c5\u0001\u0000\u0000\u0000\u03ca\u03cb\u0003P \u0000\u03cb\u03cc"+ - "\u0001\u0000\u0000\u0000\u03cc\u03cd\u0006[\u000e\u0000\u03cd\u03ce\u0006"+ - "[\u000f\u0000\u03ce\u00c7\u0001\u0000\u0000\u0000\u03cf\u03d0\u0003\u00b6"+ - "S\u0000\u03d0\u03d1\u0001\u0000\u0000\u0000\u03d1\u03d2\u0006\\\f\u0000"+ - "\u03d2\u00c9\u0001\u0000\u0000\u0000\u03d3\u03d4\u0003\u00b8T\u0000\u03d4"+ - "\u03d5\u0001\u0000\u0000\u0000\u03d5\u03d6\u0006]\u0010\u0000\u03d6\u00cb"+ - "\u0001\u0000\u0000\u0000\u03d7\u03d8\u0003\u0170\u00b0\u0000\u03d8\u03d9"+ - "\u0001\u0000\u0000\u0000\u03d9\u03da\u0006^\u0011\u0000\u03da\u00cd\u0001"+ - "\u0000\u0000\u0000\u03db\u03dc\u0003v3\u0000\u03dc\u03dd\u0001\u0000\u0000"+ - "\u0000\u03dd\u03de\u0006_\u0012\u0000\u03de\u00cf\u0001\u0000\u0000\u0000"+ - "\u03df\u03e0\u0003r1\u0000\u03e0\u03e1\u0001\u0000\u0000\u0000\u03e1\u03e2"+ - "\u0006`\u0013\u0000\u03e2\u00d1\u0001\u0000\u0000\u0000\u03e3\u03e4\u0005"+ - "m\u0000\u0000\u03e4\u03e5\u0005e\u0000\u0000\u03e5\u03e6\u0005t\u0000"+ - "\u0000\u03e6\u03e7\u0005a\u0000\u0000\u03e7\u03e8\u0005d\u0000\u0000\u03e8"+ - "\u03e9\u0005a\u0000\u0000\u03e9\u03ea\u0005t\u0000\u0000\u03ea\u03eb\u0005"+ - "a\u0000\u0000\u03eb\u00d3\u0001\u0000\u0000\u0000\u03ec\u03ed\u0003D\u001a"+ - "\u0000\u03ed\u03ee\u0001\u0000\u0000\u0000\u03ee\u03ef\u0006b\u0014\u0000"+ - "\u03ef\u00d5\u0001\u0000\u0000\u0000\u03f0\u03f1\u0003f+\u0000\u03f1\u03f2"+ - "\u0001\u0000\u0000\u0000\u03f2\u03f3\u0006c\u0015\u0000\u03f3\u00d7\u0001"+ - "\u0000\u0000\u0000\u03f4\u03f5\u0003<\u0016\u0000\u03f5\u03f6\u0001\u0000"+ - "\u0000\u0000\u03f6\u03f7\u0006d\u000b\u0000\u03f7\u00d9\u0001\u0000\u0000"+ - "\u0000\u03f8\u03f9\u0003>\u0017\u0000\u03f9\u03fa\u0001\u0000\u0000\u0000"+ - "\u03fa\u03fb\u0006e\u000b\u0000\u03fb\u00db\u0001\u0000\u0000\u0000\u03fc"+ - "\u03fd\u0003@\u0018\u0000\u03fd\u03fe\u0001\u0000\u0000\u0000\u03fe\u03ff"+ - "\u0006f\u000b\u0000\u03ff\u00dd\u0001\u0000\u0000\u0000\u0400\u0401\u0003"+ - "P \u0000\u0401\u0402\u0001\u0000\u0000\u0000\u0402\u0403\u0006g\u000e"+ - "\u0000\u0403\u0404\u0006g\u000f\u0000\u0404\u00df\u0001\u0000\u0000\u0000"+ - "\u0405\u0406\u0003z5\u0000\u0406\u0407\u0001\u0000\u0000\u0000\u0407\u0408"+ - "\u0006h\u0016\u0000\u0408\u00e1\u0001\u0000\u0000\u0000\u0409\u040a\u0003"+ - "v3\u0000\u040a\u040b\u0001\u0000\u0000\u0000\u040b\u040c\u0006i\u0012"+ - "\u0000\u040c\u00e3\u0001\u0000\u0000\u0000\u040d\u0412\u0003T\"\u0000"+ - "\u040e\u0412\u0003R!\u0000\u040f\u0412\u0003b)\u0000\u0410\u0412\u0003"+ - "\u00aeO\u0000\u0411\u040d\u0001\u0000\u0000\u0000\u0411\u040e\u0001\u0000"+ - "\u0000\u0000\u0411\u040f\u0001\u0000\u0000\u0000\u0411\u0410\u0001\u0000"+ - "\u0000\u0000\u0412\u00e5\u0001\u0000\u0000\u0000\u0413\u0416\u0003T\""+ - "\u0000\u0414\u0416\u0003\u00aeO\u0000\u0415\u0413\u0001\u0000\u0000\u0000"+ - "\u0415\u0414\u0001\u0000\u0000\u0000\u0416\u041a\u0001\u0000\u0000\u0000"+ - "\u0417\u0419\u0003\u00e4j\u0000\u0418\u0417\u0001\u0000\u0000\u0000\u0419"+ - "\u041c\u0001\u0000\u0000\u0000\u041a\u0418\u0001\u0000\u0000\u0000\u041a"+ - "\u041b\u0001\u0000\u0000\u0000\u041b\u0427\u0001\u0000\u0000\u0000\u041c"+ - "\u041a\u0001\u0000\u0000\u0000\u041d\u0420\u0003b)\u0000\u041e\u0420\u0003"+ - "\\&\u0000\u041f\u041d\u0001\u0000\u0000\u0000\u041f\u041e\u0001\u0000"+ - "\u0000\u0000\u0420\u0422\u0001\u0000\u0000\u0000\u0421\u0423\u0003\u00e4"+ - "j\u0000\u0422\u0421\u0001\u0000\u0000\u0000\u0423\u0424\u0001\u0000\u0000"+ - "\u0000\u0424\u0422\u0001\u0000\u0000\u0000\u0424\u0425\u0001\u0000\u0000"+ - "\u0000\u0425\u0427\u0001\u0000\u0000\u0000\u0426\u0415\u0001\u0000\u0000"+ - "\u0000\u0426\u041f\u0001\u0000\u0000\u0000\u0427\u00e7\u0001\u0000\u0000"+ - "\u0000\u0428\u042b\u0003\u00e6k\u0000\u0429\u042b\u0003\u00bcV\u0000\u042a"+ - "\u0428\u0001\u0000\u0000\u0000\u042a\u0429\u0001\u0000\u0000\u0000\u042b"+ - "\u042c\u0001\u0000\u0000\u0000\u042c\u042a\u0001\u0000\u0000\u0000\u042c"+ - "\u042d\u0001\u0000\u0000\u0000\u042d\u00e9\u0001\u0000\u0000\u0000\u042e"+ - "\u042f\u0003<\u0016\u0000\u042f\u0430\u0001\u0000\u0000\u0000\u0430\u0431"+ - "\u0006m\u000b\u0000\u0431\u00eb\u0001\u0000\u0000\u0000\u0432\u0433\u0003"+ - ">\u0017\u0000\u0433\u0434\u0001\u0000\u0000\u0000\u0434\u0435\u0006n\u000b"+ - "\u0000\u0435\u00ed\u0001\u0000\u0000\u0000\u0436\u0437\u0003@\u0018\u0000"+ - "\u0437\u0438\u0001\u0000\u0000\u0000\u0438\u0439\u0006o\u000b\u0000\u0439"+ - "\u00ef\u0001\u0000\u0000\u0000\u043a\u043b\u0003P \u0000\u043b\u043c\u0001"+ - "\u0000\u0000\u0000\u043c\u043d\u0006p\u000e\u0000\u043d\u043e\u0006p\u000f"+ - "\u0000\u043e\u00f1\u0001\u0000\u0000\u0000\u043f\u0440\u0003r1\u0000\u0440"+ - "\u0441\u0001\u0000\u0000\u0000\u0441\u0442\u0006q\u0013\u0000\u0442\u00f3"+ - "\u0001\u0000\u0000\u0000\u0443\u0444\u0003v3\u0000\u0444\u0445\u0001\u0000"+ - "\u0000\u0000\u0445\u0446\u0006r\u0012\u0000\u0446\u00f5\u0001\u0000\u0000"+ - "\u0000\u0447\u0448\u0003z5\u0000\u0448\u0449\u0001\u0000\u0000\u0000\u0449"+ - "\u044a\u0006s\u0016\u0000\u044a\u00f7\u0001\u0000\u0000\u0000\u044b\u044c"+ - "\u0005a\u0000\u0000\u044c\u044d\u0005s\u0000\u0000\u044d\u00f9\u0001\u0000"+ - "\u0000\u0000\u044e\u044f\u0003\u00e8l\u0000\u044f\u0450\u0001\u0000\u0000"+ - "\u0000\u0450\u0451\u0006u\u0017\u0000\u0451\u00fb\u0001\u0000\u0000\u0000"+ - "\u0452\u0453\u0003<\u0016\u0000\u0453\u0454\u0001\u0000\u0000\u0000\u0454"+ - "\u0455\u0006v\u000b\u0000\u0455\u00fd\u0001\u0000\u0000\u0000\u0456\u0457"+ - "\u0003>\u0017\u0000\u0457\u0458\u0001\u0000\u0000\u0000\u0458\u0459\u0006"+ - "w\u000b\u0000\u0459\u00ff\u0001\u0000\u0000\u0000\u045a\u045b\u0003@\u0018"+ - "\u0000\u045b\u045c\u0001\u0000\u0000\u0000\u045c\u045d\u0006x\u000b\u0000"+ - "\u045d\u0101\u0001\u0000\u0000\u0000\u045e\u045f\u0003P \u0000\u045f\u0460"+ - "\u0001\u0000\u0000\u0000\u0460\u0461\u0006y\u000e\u0000\u0461\u0462\u0006"+ - "y\u000f\u0000\u0462\u0103\u0001\u0000\u0000\u0000\u0463\u0464\u0003\u00b6"+ - "S\u0000\u0464\u0465\u0001\u0000\u0000\u0000\u0465\u0466\u0006z\f\u0000"+ - "\u0466\u0467\u0006z\u0018\u0000\u0467\u0105\u0001\u0000\u0000\u0000\u0468"+ - "\u0469\u0005o\u0000\u0000\u0469\u046a\u0005n\u0000\u0000\u046a\u046b\u0001"+ - "\u0000\u0000\u0000\u046b\u046c\u0006{\u0019\u0000\u046c\u0107\u0001\u0000"+ - "\u0000\u0000\u046d\u046e\u0005w\u0000\u0000\u046e\u046f\u0005i\u0000\u0000"+ - "\u046f\u0470\u0005t\u0000\u0000\u0470\u0471\u0005h\u0000\u0000\u0471\u0472"+ - "\u0001\u0000\u0000\u0000\u0472\u0473\u0006|\u0019\u0000\u0473\u0109\u0001"+ - "\u0000\u0000\u0000\u0474\u0475\b\f\u0000\u0000\u0475\u010b\u0001\u0000"+ - "\u0000\u0000\u0476\u0478\u0003\u010a}\u0000\u0477\u0476\u0001\u0000\u0000"+ - "\u0000\u0478\u0479\u0001\u0000\u0000\u0000\u0479\u0477\u0001\u0000\u0000"+ - "\u0000\u0479\u047a\u0001\u0000\u0000\u0000\u047a\u047b\u0001\u0000\u0000"+ - "\u0000\u047b\u047c\u0003\u0170\u00b0\u0000\u047c\u047e\u0001\u0000\u0000"+ - "\u0000\u047d\u0477\u0001\u0000\u0000\u0000\u047d\u047e\u0001\u0000\u0000"+ - "\u0000\u047e\u0480\u0001\u0000\u0000\u0000\u047f\u0481\u0003\u010a}\u0000"+ - "\u0480\u047f\u0001\u0000\u0000\u0000\u0481\u0482\u0001\u0000\u0000\u0000"+ - "\u0482\u0480\u0001\u0000\u0000\u0000\u0482\u0483\u0001\u0000\u0000\u0000"+ - "\u0483\u010d\u0001\u0000\u0000\u0000\u0484\u0485\u0003\u010c~\u0000\u0485"+ - "\u0486\u0001\u0000\u0000\u0000\u0486\u0487\u0006\u007f\u001a\u0000\u0487"+ - "\u010f\u0001\u0000\u0000\u0000\u0488\u0489\u0003<\u0016\u0000\u0489\u048a"+ - "\u0001\u0000\u0000\u0000\u048a\u048b\u0006\u0080\u000b\u0000\u048b\u0111"+ - "\u0001\u0000\u0000\u0000\u048c\u048d\u0003>\u0017\u0000\u048d\u048e\u0001"+ - "\u0000\u0000\u0000\u048e\u048f\u0006\u0081\u000b\u0000\u048f\u0113\u0001"+ - "\u0000\u0000\u0000\u0490\u0491\u0003@\u0018\u0000\u0491\u0492\u0001\u0000"+ - "\u0000\u0000\u0492\u0493\u0006\u0082\u000b\u0000\u0493\u0115\u0001\u0000"+ - "\u0000\u0000\u0494\u0495\u0003P \u0000\u0495\u0496\u0001\u0000\u0000\u0000"+ - "\u0496\u0497\u0006\u0083\u000e\u0000\u0497\u0498\u0006\u0083\u000f\u0000"+ - "\u0498\u0499\u0006\u0083\u000f\u0000\u0499\u0117\u0001\u0000\u0000\u0000"+ - "\u049a\u049b\u0003r1\u0000\u049b\u049c\u0001\u0000\u0000\u0000\u049c\u049d"+ - "\u0006\u0084\u0013\u0000\u049d\u0119\u0001\u0000\u0000\u0000\u049e\u049f"+ - "\u0003v3\u0000\u049f\u04a0\u0001\u0000\u0000\u0000\u04a0\u04a1\u0006\u0085"+ - "\u0012\u0000\u04a1\u011b\u0001\u0000\u0000\u0000\u04a2\u04a3\u0003z5\u0000"+ - "\u04a3\u04a4\u0001\u0000\u0000\u0000\u04a4\u04a5\u0006\u0086\u0016\u0000"+ - "\u04a5\u011d\u0001\u0000\u0000\u0000\u04a6\u04a7\u0003\u0108|\u0000\u04a7"+ - "\u04a8\u0001\u0000\u0000\u0000\u04a8\u04a9\u0006\u0087\u001b\u0000\u04a9"+ - "\u011f\u0001\u0000\u0000\u0000\u04aa\u04ab\u0003\u00e8l\u0000\u04ab\u04ac"+ - "\u0001\u0000\u0000\u0000\u04ac\u04ad\u0006\u0088\u0017\u0000\u04ad\u0121"+ - "\u0001\u0000\u0000\u0000\u04ae\u04af\u0003\u00beW\u0000\u04af\u04b0\u0001"+ - "\u0000\u0000\u0000\u04b0\u04b1\u0006\u0089\u001c\u0000\u04b1\u0123\u0001"+ - "\u0000\u0000\u0000\u04b2\u04b3\u0003<\u0016\u0000\u04b3\u04b4\u0001\u0000"+ - "\u0000\u0000\u04b4\u04b5\u0006\u008a\u000b\u0000\u04b5\u0125\u0001\u0000"+ - "\u0000\u0000\u04b6\u04b7\u0003>\u0017\u0000\u04b7\u04b8\u0001\u0000\u0000"+ - "\u0000\u04b8\u04b9\u0006\u008b\u000b\u0000\u04b9\u0127\u0001\u0000\u0000"+ - "\u0000\u04ba\u04bb\u0003@\u0018\u0000\u04bb\u04bc\u0001\u0000\u0000\u0000"+ - "\u04bc\u04bd\u0006\u008c\u000b\u0000\u04bd\u0129\u0001\u0000\u0000\u0000"+ - "\u04be\u04bf\u0003P \u0000\u04bf\u04c0\u0001\u0000\u0000\u0000\u04c0\u04c1"+ - "\u0006\u008d\u000e\u0000\u04c1\u04c2\u0006\u008d\u000f\u0000\u04c2\u012b"+ - "\u0001\u0000\u0000\u0000\u04c3\u04c4\u0003\u0170\u00b0\u0000\u04c4\u04c5"+ - "\u0001\u0000\u0000\u0000\u04c5\u04c6\u0006\u008e\u0011\u0000\u04c6\u012d"+ - "\u0001\u0000\u0000\u0000\u04c7\u04c8\u0003v3\u0000\u04c8\u04c9\u0001\u0000"+ - "\u0000\u0000\u04c9\u04ca\u0006\u008f\u0012\u0000\u04ca\u012f\u0001\u0000"+ - "\u0000\u0000\u04cb\u04cc\u0003z5\u0000\u04cc\u04cd\u0001\u0000\u0000\u0000"+ - "\u04cd\u04ce\u0006\u0090\u0016\u0000\u04ce\u0131\u0001\u0000\u0000\u0000"+ - "\u04cf\u04d0\u0003\u0106{\u0000\u04d0\u04d1\u0001\u0000\u0000\u0000\u04d1"+ - "\u04d2\u0006\u0091\u001d\u0000\u04d2\u04d3\u0006\u0091\u001e\u0000\u04d3"+ - "\u0133\u0001\u0000\u0000\u0000\u04d4\u04d5\u0003D\u001a\u0000\u04d5\u04d6"+ - "\u0001\u0000\u0000\u0000\u04d6\u04d7\u0006\u0092\u0014\u0000\u04d7\u0135"+ - "\u0001\u0000\u0000\u0000\u04d8\u04d9\u0003f+\u0000\u04d9\u04da\u0001\u0000"+ - "\u0000\u0000\u04da\u04db\u0006\u0093\u0015\u0000\u04db\u0137\u0001\u0000"+ - "\u0000\u0000\u04dc\u04dd\u0003<\u0016\u0000\u04dd\u04de\u0001\u0000\u0000"+ - "\u0000\u04de\u04df\u0006\u0094\u000b\u0000\u04df\u0139\u0001\u0000\u0000"+ - "\u0000\u04e0\u04e1\u0003>\u0017\u0000\u04e1\u04e2\u0001\u0000\u0000\u0000"+ - "\u04e2\u04e3\u0006\u0095\u000b\u0000\u04e3\u013b\u0001\u0000\u0000\u0000"+ - "\u04e4\u04e5\u0003@\u0018\u0000\u04e5\u04e6\u0001\u0000\u0000\u0000\u04e6"+ - "\u04e7\u0006\u0096\u000b\u0000\u04e7\u013d\u0001\u0000\u0000\u0000\u04e8"+ - "\u04e9\u0003P \u0000\u04e9\u04ea\u0001\u0000\u0000\u0000\u04ea\u04eb\u0006"+ - "\u0097\u000e\u0000\u04eb\u04ec\u0006\u0097\u000f\u0000\u04ec\u04ed\u0006"+ - "\u0097\u000f\u0000\u04ed\u013f\u0001\u0000\u0000\u0000\u04ee\u04ef\u0003"+ - "v3\u0000\u04ef\u04f0\u0001\u0000\u0000\u0000\u04f0\u04f1\u0006\u0098\u0012"+ - "\u0000\u04f1\u0141\u0001\u0000\u0000\u0000\u04f2\u04f3\u0003z5\u0000\u04f3"+ - "\u04f4\u0001\u0000\u0000\u0000\u04f4\u04f5\u0006\u0099\u0016\u0000\u04f5"+ - "\u0143\u0001\u0000\u0000\u0000\u04f6\u04f7\u0003\u00e8l\u0000\u04f7\u04f8"+ - "\u0001\u0000\u0000\u0000\u04f8\u04f9\u0006\u009a\u0017\u0000\u04f9\u0145"+ - "\u0001\u0000\u0000\u0000\u04fa\u04fb\u0003<\u0016\u0000\u04fb\u04fc\u0001"+ - "\u0000\u0000\u0000\u04fc\u04fd\u0006\u009b\u000b\u0000\u04fd\u0147\u0001"+ - "\u0000\u0000\u0000\u04fe\u04ff\u0003>\u0017\u0000\u04ff\u0500\u0001\u0000"+ - "\u0000\u0000\u0500\u0501\u0006\u009c\u000b\u0000\u0501\u0149\u0001\u0000"+ - "\u0000\u0000\u0502\u0503\u0003@\u0018\u0000\u0503\u0504\u0001\u0000\u0000"+ - "\u0000\u0504\u0505\u0006\u009d\u000b\u0000\u0505\u014b\u0001\u0000\u0000"+ - "\u0000\u0506\u0507\u0003P \u0000\u0507\u0508\u0001\u0000\u0000\u0000\u0508"+ - "\u0509\u0006\u009e\u000e\u0000\u0509\u050a\u0006\u009e\u000f\u0000\u050a"+ - "\u014d\u0001\u0000\u0000\u0000\u050b\u050c\u0003z5\u0000\u050c\u050d\u0001"+ - "\u0000\u0000\u0000\u050d\u050e\u0006\u009f\u0016\u0000\u050e\u014f\u0001"+ - "\u0000\u0000\u0000\u050f\u0510\u0003\u00beW\u0000\u0510\u0511\u0001\u0000"+ - "\u0000\u0000\u0511\u0512\u0006\u00a0\u001c\u0000\u0512\u0151\u0001\u0000"+ - "\u0000\u0000\u0513\u0514\u0003\u00baU\u0000\u0514\u0515\u0001\u0000\u0000"+ - "\u0000\u0515\u0516\u0006\u00a1\u001f\u0000\u0516\u0153\u0001\u0000\u0000"+ - "\u0000\u0517\u0518\u0003<\u0016\u0000\u0518\u0519\u0001\u0000\u0000\u0000"+ - "\u0519\u051a\u0006\u00a2\u000b\u0000\u051a\u0155\u0001\u0000\u0000\u0000"+ - "\u051b\u051c\u0003>\u0017\u0000\u051c\u051d\u0001\u0000\u0000\u0000\u051d"+ - "\u051e\u0006\u00a3\u000b\u0000\u051e\u0157\u0001\u0000\u0000\u0000\u051f"+ - "\u0520\u0003@\u0018\u0000\u0520\u0521\u0001\u0000\u0000\u0000\u0521\u0522"+ - "\u0006\u00a4\u000b\u0000\u0522\u0159\u0001\u0000\u0000\u0000\u0523\u0524"+ - "\u0003P \u0000\u0524\u0525\u0001\u0000\u0000\u0000\u0525\u0526\u0006\u00a5"+ - "\u000e\u0000\u0526\u0527\u0006\u00a5\u000f\u0000\u0527\u015b\u0001\u0000"+ - "\u0000\u0000\u0528\u0529\u0005i\u0000\u0000\u0529\u052a\u0005n\u0000\u0000"+ - "\u052a\u052b\u0005f\u0000\u0000\u052b\u052c\u0005o\u0000\u0000\u052c\u015d"+ - "\u0001\u0000\u0000\u0000\u052d\u052e\u0003<\u0016\u0000\u052e\u052f\u0001"+ - "\u0000\u0000\u0000\u052f\u0530\u0006\u00a7\u000b\u0000\u0530\u015f\u0001"+ - "\u0000\u0000\u0000\u0531\u0532\u0003>\u0017\u0000\u0532\u0533\u0001\u0000"+ - "\u0000\u0000\u0533\u0534\u0006\u00a8\u000b\u0000\u0534\u0161\u0001\u0000"+ - "\u0000\u0000\u0535\u0536\u0003@\u0018\u0000\u0536\u0537\u0001\u0000\u0000"+ - "\u0000\u0537\u0538\u0006\u00a9\u000b\u0000\u0538\u0163\u0001\u0000\u0000"+ - "\u0000\u0539\u053a\u0003P \u0000\u053a\u053b\u0001\u0000\u0000\u0000\u053b"+ - "\u053c\u0006\u00aa\u000e\u0000\u053c\u053d\u0006\u00aa\u000f\u0000\u053d"+ - "\u0165\u0001\u0000\u0000\u0000\u053e\u053f\u0005f\u0000\u0000\u053f\u0540"+ - "\u0005u\u0000\u0000\u0540\u0541\u0005n\u0000\u0000\u0541\u0542\u0005c"+ - "\u0000\u0000\u0542\u0543\u0005t\u0000\u0000\u0543\u0544\u0005i\u0000\u0000"+ - "\u0544\u0545\u0005o\u0000\u0000\u0545\u0546\u0005n\u0000\u0000\u0546\u0547"+ - "\u0005s\u0000\u0000\u0547\u0167\u0001\u0000\u0000\u0000\u0548\u0549\u0003"+ - "<\u0016\u0000\u0549\u054a\u0001\u0000\u0000\u0000\u054a\u054b\u0006\u00ac"+ - "\u000b\u0000\u054b\u0169\u0001\u0000\u0000\u0000\u054c\u054d\u0003>\u0017"+ - "\u0000\u054d\u054e\u0001\u0000\u0000\u0000\u054e\u054f\u0006\u00ad\u000b"+ - "\u0000\u054f\u016b\u0001\u0000\u0000\u0000\u0550\u0551\u0003@\u0018\u0000"+ - "\u0551\u0552\u0001\u0000\u0000\u0000\u0552\u0553\u0006\u00ae\u000b\u0000"+ - "\u0553\u016d\u0001\u0000\u0000\u0000\u0554\u0555\u0003\u00b8T\u0000\u0555"+ - "\u0556\u0001\u0000\u0000\u0000\u0556\u0557\u0006\u00af\u0010\u0000\u0557"+ - "\u0558\u0006\u00af\u000f\u0000\u0558\u016f\u0001\u0000\u0000\u0000\u0559"+ - "\u055a\u0005:\u0000\u0000\u055a\u0171\u0001\u0000\u0000\u0000\u055b\u0561"+ - "\u0003\\&\u0000\u055c\u0561\u0003R!\u0000\u055d\u0561\u0003z5\u0000\u055e"+ - "\u0561\u0003T\"\u0000\u055f\u0561\u0003b)\u0000\u0560\u055b\u0001\u0000"+ - "\u0000\u0000\u0560\u055c\u0001\u0000\u0000\u0000\u0560\u055d\u0001\u0000"+ - "\u0000\u0000\u0560\u055e\u0001\u0000\u0000\u0000\u0560\u055f\u0001\u0000"+ - "\u0000\u0000\u0561\u0562\u0001\u0000\u0000\u0000\u0562\u0560\u0001\u0000"+ - "\u0000\u0000\u0562\u0563\u0001\u0000\u0000\u0000\u0563\u0173\u0001\u0000"+ - "\u0000\u0000\u0564\u0565\u0003<\u0016\u0000\u0565\u0566\u0001\u0000\u0000"+ - "\u0000\u0566\u0567\u0006\u00b2\u000b\u0000\u0567\u0175\u0001\u0000\u0000"+ - "\u0000\u0568\u0569\u0003>\u0017\u0000\u0569\u056a\u0001\u0000\u0000\u0000"+ - "\u056a\u056b\u0006\u00b3\u000b\u0000\u056b\u0177\u0001\u0000\u0000\u0000"+ - "\u056c\u056d\u0003@\u0018\u0000\u056d\u056e\u0001\u0000\u0000\u0000\u056e"+ - "\u056f\u0006\u00b4\u000b\u0000\u056f\u0179\u0001\u0000\u0000\u0000\u0570"+ - "\u0571\u0003P \u0000\u0571\u0572\u0001\u0000\u0000\u0000\u0572\u0573\u0006"+ - "\u00b5\u000e\u0000\u0573\u0574\u0006\u00b5\u000f\u0000\u0574\u017b\u0001"+ - "\u0000\u0000\u0000\u0575\u0576\u0003D\u001a\u0000\u0576\u0577\u0001\u0000"+ - "\u0000\u0000\u0577\u0578\u0006\u00b6\u0014\u0000\u0578\u0579\u0006\u00b6"+ - "\u000f\u0000\u0579\u057a\u0006\u00b6 \u0000\u057a\u017d\u0001\u0000\u0000"+ - "\u0000\u057b\u057c\u0003f+\u0000\u057c\u057d\u0001\u0000\u0000\u0000\u057d"+ - "\u057e\u0006\u00b7\u0015\u0000\u057e\u057f\u0006\u00b7\u000f\u0000\u057f"+ - "\u0580\u0006\u00b7 \u0000\u0580\u017f\u0001\u0000\u0000\u0000\u0581\u0582"+ - "\u0003<\u0016\u0000\u0582\u0583\u0001\u0000\u0000\u0000\u0583\u0584\u0006"+ - "\u00b8\u000b\u0000\u0584\u0181\u0001\u0000\u0000\u0000\u0585\u0586\u0003"+ - ">\u0017\u0000\u0586\u0587\u0001\u0000\u0000\u0000\u0587\u0588\u0006\u00b9"+ - "\u000b\u0000\u0588\u0183\u0001\u0000\u0000\u0000\u0589\u058a\u0003@\u0018"+ - "\u0000\u058a\u058b\u0001\u0000\u0000\u0000\u058b\u058c\u0006\u00ba\u000b"+ - "\u0000\u058c\u0185\u0001\u0000\u0000\u0000\u058d\u058e\u0003\u0170\u00b0"+ - "\u0000\u058e\u058f\u0001\u0000\u0000\u0000\u058f\u0590\u0006\u00bb\u0011"+ - "\u0000\u0590\u0591\u0006\u00bb\u000f\u0000\u0591\u0592\u0006\u00bb\u0007"+ - "\u0000\u0592\u0187\u0001\u0000\u0000\u0000\u0593\u0594\u0003v3\u0000\u0594"+ - "\u0595\u0001\u0000\u0000\u0000\u0595\u0596\u0006\u00bc\u0012\u0000\u0596"+ - "\u0597\u0006\u00bc\u000f\u0000\u0597\u0598\u0006\u00bc\u0007\u0000\u0598"+ - "\u0189\u0001\u0000\u0000\u0000\u0599\u059a\u0003<\u0016\u0000\u059a\u059b"+ - "\u0001\u0000\u0000\u0000\u059b\u059c\u0006\u00bd\u000b\u0000\u059c\u018b"+ - "\u0001\u0000\u0000\u0000\u059d\u059e\u0003>\u0017\u0000\u059e\u059f\u0001"+ - "\u0000\u0000\u0000\u059f\u05a0\u0006\u00be\u000b\u0000\u05a0\u018d\u0001"+ - "\u0000\u0000\u0000\u05a1\u05a2\u0003@\u0018\u0000\u05a2\u05a3\u0001\u0000"+ - "\u0000\u0000\u05a3\u05a4\u0006\u00bf\u000b\u0000\u05a4\u018f\u0001\u0000"+ - "\u0000\u0000\u05a5\u05a6\u0003\u00beW\u0000\u05a6\u05a7\u0001\u0000\u0000"+ - "\u0000\u05a7\u05a8\u0006\u00c0\u000f\u0000\u05a8\u05a9\u0006\u00c0\u0000"+ - "\u0000\u05a9\u05aa\u0006\u00c0\u001c\u0000\u05aa\u0191\u0001\u0000\u0000"+ - "\u0000\u05ab\u05ac\u0003\u00baU\u0000\u05ac\u05ad\u0001\u0000\u0000\u0000"+ - "\u05ad\u05ae\u0006\u00c1\u000f\u0000\u05ae\u05af\u0006\u00c1\u0000\u0000"+ - "\u05af\u05b0\u0006\u00c1\u001f\u0000\u05b0\u0193\u0001\u0000\u0000\u0000"+ - "\u05b1\u05b2\u0003l.\u0000\u05b2\u05b3\u0001\u0000\u0000\u0000\u05b3\u05b4"+ - "\u0006\u00c2\u000f\u0000\u05b4\u05b5\u0006\u00c2\u0000\u0000\u05b5\u05b6"+ - "\u0006\u00c2!\u0000\u05b6\u0195\u0001\u0000\u0000\u0000\u05b7\u05b8\u0003"+ - "P \u0000\u05b8\u05b9\u0001\u0000\u0000\u0000\u05b9\u05ba\u0006\u00c3\u000e"+ - "\u0000\u05ba\u05bb\u0006\u00c3\u000f\u0000\u05bb\u0197\u0001\u0000\u0000"+ - "\u0000A\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007\b\t\n\u000b\f"+ + "\u0000\u0388\u038b\u0003\u0094B\u0000\u0389\u038c\u0003T\"\u0000\u038a"+ + "\u038c\u0003b)\u0000\u038b\u0389\u0001\u0000\u0000\u0000\u038b\u038a\u0001"+ + "\u0000\u0000\u0000\u038c\u0390\u0001\u0000\u0000\u0000\u038d\u038f\u0003"+ + "d*\u0000\u038e\u038d\u0001\u0000\u0000\u0000\u038f\u0392\u0001\u0000\u0000"+ + "\u0000\u0390\u038e\u0001\u0000\u0000\u0000\u0390\u0391\u0001\u0000\u0000"+ + "\u0000\u0391\u039a\u0001\u0000\u0000\u0000\u0392\u0390\u0001\u0000\u0000"+ + "\u0000\u0393\u0395\u0003\u0094B\u0000\u0394\u0396\u0003R!\u0000\u0395"+ + "\u0394\u0001\u0000\u0000\u0000\u0396\u0397\u0001\u0000\u0000\u0000\u0397"+ + "\u0395\u0001\u0000\u0000\u0000\u0397\u0398\u0001\u0000\u0000\u0000\u0398"+ + "\u039a\u0001\u0000\u0000\u0000\u0399\u0388\u0001\u0000\u0000\u0000\u0399"+ + "\u0393\u0001\u0000\u0000\u0000\u039a\u00b5\u0001\u0000\u0000\u0000\u039b"+ + "\u039c\u0005[\u0000\u0000\u039c\u039d\u0001\u0000\u0000\u0000\u039d\u039e"+ + "\u0006S\u0000\u0000\u039e\u039f\u0006S\u0000\u0000\u039f\u00b7\u0001\u0000"+ + "\u0000\u0000\u03a0\u03a1\u0005]\u0000\u0000\u03a1\u03a2\u0001\u0000\u0000"+ + "\u0000\u03a2\u03a3\u0006T\u000f\u0000\u03a3\u03a4\u0006T\u000f\u0000\u03a4"+ + "\u00b9\u0001\u0000\u0000\u0000\u03a5\u03a9\u0003T\"\u0000\u03a6\u03a8"+ + "\u0003d*\u0000\u03a7\u03a6\u0001\u0000\u0000\u0000\u03a8\u03ab\u0001\u0000"+ + "\u0000\u0000\u03a9\u03a7\u0001\u0000\u0000\u0000\u03a9\u03aa\u0001\u0000"+ + "\u0000\u0000\u03aa\u03b6\u0001\u0000\u0000\u0000\u03ab\u03a9\u0001\u0000"+ + "\u0000\u0000\u03ac\u03af\u0003b)\u0000\u03ad\u03af\u0003\\&\u0000\u03ae"+ + "\u03ac\u0001\u0000\u0000\u0000\u03ae\u03ad\u0001\u0000\u0000\u0000\u03af"+ + "\u03b1\u0001\u0000\u0000\u0000\u03b0\u03b2\u0003d*\u0000\u03b1\u03b0\u0001"+ + "\u0000\u0000\u0000\u03b2\u03b3\u0001\u0000\u0000\u0000\u03b3\u03b1\u0001"+ + "\u0000\u0000\u0000\u03b3\u03b4\u0001\u0000\u0000\u0000\u03b4\u03b6\u0001"+ + "\u0000\u0000\u0000\u03b5\u03a5\u0001\u0000\u0000\u0000\u03b5\u03ae\u0001"+ + "\u0000\u0000\u0000\u03b6\u00bb\u0001\u0000\u0000\u0000\u03b7\u03b9\u0003"+ + "^\'\u0000\u03b8\u03ba\u0003`(\u0000\u03b9\u03b8\u0001\u0000\u0000\u0000"+ + "\u03ba\u03bb\u0001\u0000\u0000\u0000\u03bb\u03b9\u0001\u0000\u0000\u0000"+ + "\u03bb\u03bc\u0001\u0000\u0000\u0000\u03bc\u03bd\u0001\u0000\u0000\u0000"+ + "\u03bd\u03be\u0003^\'\u0000\u03be\u00bd\u0001\u0000\u0000\u0000\u03bf"+ + "\u03c0\u0003\u00bcV\u0000\u03c0\u00bf\u0001\u0000\u0000\u0000\u03c1\u03c2"+ + "\u0003<\u0016\u0000\u03c2\u03c3\u0001\u0000\u0000\u0000\u03c3\u03c4\u0006"+ + "X\u000b\u0000\u03c4\u00c1\u0001\u0000\u0000\u0000\u03c5\u03c6\u0003>\u0017"+ + "\u0000\u03c6\u03c7\u0001\u0000\u0000\u0000\u03c7\u03c8\u0006Y\u000b\u0000"+ + "\u03c8\u00c3\u0001\u0000\u0000\u0000\u03c9\u03ca\u0003@\u0018\u0000\u03ca"+ + "\u03cb\u0001\u0000\u0000\u0000\u03cb\u03cc\u0006Z\u000b\u0000\u03cc\u00c5"+ + "\u0001\u0000\u0000\u0000\u03cd\u03ce\u0003P \u0000\u03ce\u03cf\u0001\u0000"+ + "\u0000\u0000\u03cf\u03d0\u0006[\u000e\u0000\u03d0\u03d1\u0006[\u000f\u0000"+ + "\u03d1\u00c7\u0001\u0000\u0000\u0000\u03d2\u03d3\u0003\u00b6S\u0000\u03d3"+ + "\u03d4\u0001\u0000\u0000\u0000\u03d4\u03d5\u0006\\\f\u0000\u03d5\u00c9"+ + "\u0001\u0000\u0000\u0000\u03d6\u03d7\u0003\u00b8T\u0000\u03d7\u03d8\u0001"+ + "\u0000\u0000\u0000\u03d8\u03d9\u0006]\u0010\u0000\u03d9\u00cb\u0001\u0000"+ + "\u0000\u0000\u03da\u03db\u0003\u0170\u00b0\u0000\u03db\u03dc\u0001\u0000"+ + "\u0000\u0000\u03dc\u03dd\u0006^\u0011\u0000\u03dd\u00cd\u0001\u0000\u0000"+ + "\u0000\u03de\u03df\u0003v3\u0000\u03df\u03e0\u0001\u0000\u0000\u0000\u03e0"+ + "\u03e1\u0006_\u0012\u0000\u03e1\u00cf\u0001\u0000\u0000\u0000\u03e2\u03e3"+ + "\u0003r1\u0000\u03e3\u03e4\u0001\u0000\u0000\u0000\u03e4\u03e5\u0006`"+ + "\u0013\u0000\u03e5\u00d1\u0001\u0000\u0000\u0000\u03e6\u03e7\u0005m\u0000"+ + "\u0000\u03e7\u03e8\u0005e\u0000\u0000\u03e8\u03e9\u0005t\u0000\u0000\u03e9"+ + "\u03ea\u0005a\u0000\u0000\u03ea\u03eb\u0005d\u0000\u0000\u03eb\u03ec\u0005"+ + "a\u0000\u0000\u03ec\u03ed\u0005t\u0000\u0000\u03ed\u03ee\u0005a\u0000"+ + "\u0000\u03ee\u00d3\u0001\u0000\u0000\u0000\u03ef\u03f0\u0003D\u001a\u0000"+ + "\u03f0\u03f1\u0001\u0000\u0000\u0000\u03f1\u03f2\u0006b\u0014\u0000\u03f2"+ + "\u00d5\u0001\u0000\u0000\u0000\u03f3\u03f4\u0003f+\u0000\u03f4\u03f5\u0001"+ + "\u0000\u0000\u0000\u03f5\u03f6\u0006c\u0015\u0000\u03f6\u00d7\u0001\u0000"+ + "\u0000\u0000\u03f7\u03f8\u0003<\u0016\u0000\u03f8\u03f9\u0001\u0000\u0000"+ + "\u0000\u03f9\u03fa\u0006d\u000b\u0000\u03fa\u00d9\u0001\u0000\u0000\u0000"+ + "\u03fb\u03fc\u0003>\u0017\u0000\u03fc\u03fd\u0001\u0000\u0000\u0000\u03fd"+ + "\u03fe\u0006e\u000b\u0000\u03fe\u00db\u0001\u0000\u0000\u0000\u03ff\u0400"+ + "\u0003@\u0018\u0000\u0400\u0401\u0001\u0000\u0000\u0000\u0401\u0402\u0006"+ + "f\u000b\u0000\u0402\u00dd\u0001\u0000\u0000\u0000\u0403\u0404\u0003P "+ + "\u0000\u0404\u0405\u0001\u0000\u0000\u0000\u0405\u0406\u0006g\u000e\u0000"+ + "\u0406\u0407\u0006g\u000f\u0000\u0407\u00df\u0001\u0000\u0000\u0000\u0408"+ + "\u0409\u0003z5\u0000\u0409\u040a\u0001\u0000\u0000\u0000\u040a\u040b\u0006"+ + "h\u0016\u0000\u040b\u00e1\u0001\u0000\u0000\u0000\u040c\u040d\u0003v3"+ + "\u0000\u040d\u040e\u0001\u0000\u0000\u0000\u040e\u040f\u0006i\u0012\u0000"+ + "\u040f\u00e3\u0001\u0000\u0000\u0000\u0410\u0415\u0003T\"\u0000\u0411"+ + "\u0415\u0003R!\u0000\u0412\u0415\u0003b)\u0000\u0413\u0415\u0003\u00ae"+ + "O\u0000\u0414\u0410\u0001\u0000\u0000\u0000\u0414\u0411\u0001\u0000\u0000"+ + "\u0000\u0414\u0412\u0001\u0000\u0000\u0000\u0414\u0413\u0001\u0000\u0000"+ + "\u0000\u0415\u00e5\u0001\u0000\u0000\u0000\u0416\u0419\u0003T\"\u0000"+ + "\u0417\u0419\u0003\u00aeO\u0000\u0418\u0416\u0001\u0000\u0000\u0000\u0418"+ + "\u0417\u0001\u0000\u0000\u0000\u0419\u041d\u0001\u0000\u0000\u0000\u041a"+ + "\u041c\u0003\u00e4j\u0000\u041b\u041a\u0001\u0000\u0000\u0000\u041c\u041f"+ + "\u0001\u0000\u0000\u0000\u041d\u041b\u0001\u0000\u0000\u0000\u041d\u041e"+ + "\u0001\u0000\u0000\u0000\u041e\u042a\u0001\u0000\u0000\u0000\u041f\u041d"+ + "\u0001\u0000\u0000\u0000\u0420\u0423\u0003b)\u0000\u0421\u0423\u0003\\"+ + "&\u0000\u0422\u0420\u0001\u0000\u0000\u0000\u0422\u0421\u0001\u0000\u0000"+ + "\u0000\u0423\u0425\u0001\u0000\u0000\u0000\u0424\u0426\u0003\u00e4j\u0000"+ + "\u0425\u0424\u0001\u0000\u0000\u0000\u0426\u0427\u0001\u0000\u0000\u0000"+ + "\u0427\u0425\u0001\u0000\u0000\u0000\u0427\u0428\u0001\u0000\u0000\u0000"+ + "\u0428\u042a\u0001\u0000\u0000\u0000\u0429\u0418\u0001\u0000\u0000\u0000"+ + "\u0429\u0422\u0001\u0000\u0000\u0000\u042a\u00e7\u0001\u0000\u0000\u0000"+ + "\u042b\u042e\u0003\u00e6k\u0000\u042c\u042e\u0003\u00bcV\u0000\u042d\u042b"+ + "\u0001\u0000\u0000\u0000\u042d\u042c\u0001\u0000\u0000\u0000\u042e\u042f"+ + "\u0001\u0000\u0000\u0000\u042f\u042d\u0001\u0000\u0000\u0000\u042f\u0430"+ + "\u0001\u0000\u0000\u0000\u0430\u00e9\u0001\u0000\u0000\u0000\u0431\u0432"+ + "\u0003<\u0016\u0000\u0432\u0433\u0001\u0000\u0000\u0000\u0433\u0434\u0006"+ + "m\u000b\u0000\u0434\u00eb\u0001\u0000\u0000\u0000\u0435\u0436\u0003>\u0017"+ + "\u0000\u0436\u0437\u0001\u0000\u0000\u0000\u0437\u0438\u0006n\u000b\u0000"+ + "\u0438\u00ed\u0001\u0000\u0000\u0000\u0439\u043a\u0003@\u0018\u0000\u043a"+ + "\u043b\u0001\u0000\u0000\u0000\u043b\u043c\u0006o\u000b\u0000\u043c\u00ef"+ + "\u0001\u0000\u0000\u0000\u043d\u043e\u0003P \u0000\u043e\u043f\u0001\u0000"+ + "\u0000\u0000\u043f\u0440\u0006p\u000e\u0000\u0440\u0441\u0006p\u000f\u0000"+ + "\u0441\u00f1\u0001\u0000\u0000\u0000\u0442\u0443\u0003r1\u0000\u0443\u0444"+ + "\u0001\u0000\u0000\u0000\u0444\u0445\u0006q\u0013\u0000\u0445\u00f3\u0001"+ + "\u0000\u0000\u0000\u0446\u0447\u0003v3\u0000\u0447\u0448\u0001\u0000\u0000"+ + "\u0000\u0448\u0449\u0006r\u0012\u0000\u0449\u00f5\u0001\u0000\u0000\u0000"+ + "\u044a\u044b\u0003z5\u0000\u044b\u044c\u0001\u0000\u0000\u0000\u044c\u044d"+ + "\u0006s\u0016\u0000\u044d\u00f7\u0001\u0000\u0000\u0000\u044e\u044f\u0005"+ + "a\u0000\u0000\u044f\u0450\u0005s\u0000\u0000\u0450\u00f9\u0001\u0000\u0000"+ + "\u0000\u0451\u0452\u0003\u00e8l\u0000\u0452\u0453\u0001\u0000\u0000\u0000"+ + "\u0453\u0454\u0006u\u0017\u0000\u0454\u00fb\u0001\u0000\u0000\u0000\u0455"+ + "\u0456\u0003<\u0016\u0000\u0456\u0457\u0001\u0000\u0000\u0000\u0457\u0458"+ + "\u0006v\u000b\u0000\u0458\u00fd\u0001\u0000\u0000\u0000\u0459\u045a\u0003"+ + ">\u0017\u0000\u045a\u045b\u0001\u0000\u0000\u0000\u045b\u045c\u0006w\u000b"+ + "\u0000\u045c\u00ff\u0001\u0000\u0000\u0000\u045d\u045e\u0003@\u0018\u0000"+ + "\u045e\u045f\u0001\u0000\u0000\u0000\u045f\u0460\u0006x\u000b\u0000\u0460"+ + "\u0101\u0001\u0000\u0000\u0000\u0461\u0462\u0003P \u0000\u0462\u0463\u0001"+ + "\u0000\u0000\u0000\u0463\u0464\u0006y\u000e\u0000\u0464\u0465\u0006y\u000f"+ + "\u0000\u0465\u0103\u0001\u0000\u0000\u0000\u0466\u0467\u0003\u00b6S\u0000"+ + "\u0467\u0468\u0001\u0000\u0000\u0000\u0468\u0469\u0006z\f\u0000\u0469"+ + "\u046a\u0006z\u0018\u0000\u046a\u0105\u0001\u0000\u0000\u0000\u046b\u046c"+ + "\u0005o\u0000\u0000\u046c\u046d\u0005n\u0000\u0000\u046d\u046e\u0001\u0000"+ + "\u0000\u0000\u046e\u046f\u0006{\u0019\u0000\u046f\u0107\u0001\u0000\u0000"+ + "\u0000\u0470\u0471\u0005w\u0000\u0000\u0471\u0472\u0005i\u0000\u0000\u0472"+ + "\u0473\u0005t\u0000\u0000\u0473\u0474\u0005h\u0000\u0000\u0474\u0475\u0001"+ + "\u0000\u0000\u0000\u0475\u0476\u0006|\u0019\u0000\u0476\u0109\u0001\u0000"+ + "\u0000\u0000\u0477\u0478\b\f\u0000\u0000\u0478\u010b\u0001\u0000\u0000"+ + "\u0000\u0479\u047b\u0003\u010a}\u0000\u047a\u0479\u0001\u0000\u0000\u0000"+ + "\u047b\u047c\u0001\u0000\u0000\u0000\u047c\u047a\u0001\u0000\u0000\u0000"+ + "\u047c\u047d\u0001\u0000\u0000\u0000\u047d\u047e\u0001\u0000\u0000\u0000"+ + "\u047e\u047f\u0003\u0170\u00b0\u0000\u047f\u0481\u0001\u0000\u0000\u0000"+ + "\u0480\u047a\u0001\u0000\u0000\u0000\u0480\u0481\u0001\u0000\u0000\u0000"+ + "\u0481\u0483\u0001\u0000\u0000\u0000\u0482\u0484\u0003\u010a}\u0000\u0483"+ + "\u0482\u0001\u0000\u0000\u0000\u0484\u0485\u0001\u0000\u0000\u0000\u0485"+ + "\u0483\u0001\u0000\u0000\u0000\u0485\u0486\u0001\u0000\u0000\u0000\u0486"+ + "\u010d\u0001\u0000\u0000\u0000\u0487\u0488\u0003\u010c~\u0000\u0488\u0489"+ + "\u0001\u0000\u0000\u0000\u0489\u048a\u0006\u007f\u001a\u0000\u048a\u010f"+ + "\u0001\u0000\u0000\u0000\u048b\u048c\u0003<\u0016\u0000\u048c\u048d\u0001"+ + "\u0000\u0000\u0000\u048d\u048e\u0006\u0080\u000b\u0000\u048e\u0111\u0001"+ + "\u0000\u0000\u0000\u048f\u0490\u0003>\u0017\u0000\u0490\u0491\u0001\u0000"+ + "\u0000\u0000\u0491\u0492\u0006\u0081\u000b\u0000\u0492\u0113\u0001\u0000"+ + "\u0000\u0000\u0493\u0494\u0003@\u0018\u0000\u0494\u0495\u0001\u0000\u0000"+ + "\u0000\u0495\u0496\u0006\u0082\u000b\u0000\u0496\u0115\u0001\u0000\u0000"+ + "\u0000\u0497\u0498\u0003P \u0000\u0498\u0499\u0001\u0000\u0000\u0000\u0499"+ + "\u049a\u0006\u0083\u000e\u0000\u049a\u049b\u0006\u0083\u000f\u0000\u049b"+ + "\u049c\u0006\u0083\u000f\u0000\u049c\u0117\u0001\u0000\u0000\u0000\u049d"+ + "\u049e\u0003r1\u0000\u049e\u049f\u0001\u0000\u0000\u0000\u049f\u04a0\u0006"+ + "\u0084\u0013\u0000\u04a0\u0119\u0001\u0000\u0000\u0000\u04a1\u04a2\u0003"+ + "v3\u0000\u04a2\u04a3\u0001\u0000\u0000\u0000\u04a3\u04a4\u0006\u0085\u0012"+ + "\u0000\u04a4\u011b\u0001\u0000\u0000\u0000\u04a5\u04a6\u0003z5\u0000\u04a6"+ + "\u04a7\u0001\u0000\u0000\u0000\u04a7\u04a8\u0006\u0086\u0016\u0000\u04a8"+ + "\u011d\u0001\u0000\u0000\u0000\u04a9\u04aa\u0003\u0108|\u0000\u04aa\u04ab"+ + "\u0001\u0000\u0000\u0000\u04ab\u04ac\u0006\u0087\u001b\u0000\u04ac\u011f"+ + "\u0001\u0000\u0000\u0000\u04ad\u04ae\u0003\u00e8l\u0000\u04ae\u04af\u0001"+ + "\u0000\u0000\u0000\u04af\u04b0\u0006\u0088\u0017\u0000\u04b0\u0121\u0001"+ + "\u0000\u0000\u0000\u04b1\u04b2\u0003\u00beW\u0000\u04b2\u04b3\u0001\u0000"+ + "\u0000\u0000\u04b3\u04b4\u0006\u0089\u001c\u0000\u04b4\u0123\u0001\u0000"+ + "\u0000\u0000\u04b5\u04b6\u0003<\u0016\u0000\u04b6\u04b7\u0001\u0000\u0000"+ + "\u0000\u04b7\u04b8\u0006\u008a\u000b\u0000\u04b8\u0125\u0001\u0000\u0000"+ + "\u0000\u04b9\u04ba\u0003>\u0017\u0000\u04ba\u04bb\u0001\u0000\u0000\u0000"+ + "\u04bb\u04bc\u0006\u008b\u000b\u0000\u04bc\u0127\u0001\u0000\u0000\u0000"+ + "\u04bd\u04be\u0003@\u0018\u0000\u04be\u04bf\u0001\u0000\u0000\u0000\u04bf"+ + "\u04c0\u0006\u008c\u000b\u0000\u04c0\u0129\u0001\u0000\u0000\u0000\u04c1"+ + "\u04c2\u0003P \u0000\u04c2\u04c3\u0001\u0000\u0000\u0000\u04c3\u04c4\u0006"+ + "\u008d\u000e\u0000\u04c4\u04c5\u0006\u008d\u000f\u0000\u04c5\u012b\u0001"+ + "\u0000\u0000\u0000\u04c6\u04c7\u0003\u0170\u00b0\u0000\u04c7\u04c8\u0001"+ + "\u0000\u0000\u0000\u04c8\u04c9\u0006\u008e\u0011\u0000\u04c9\u012d\u0001"+ + "\u0000\u0000\u0000\u04ca\u04cb\u0003v3\u0000\u04cb\u04cc\u0001\u0000\u0000"+ + "\u0000\u04cc\u04cd\u0006\u008f\u0012\u0000\u04cd\u012f\u0001\u0000\u0000"+ + "\u0000\u04ce\u04cf\u0003z5\u0000\u04cf\u04d0\u0001\u0000\u0000\u0000\u04d0"+ + "\u04d1\u0006\u0090\u0016\u0000\u04d1\u0131\u0001\u0000\u0000\u0000\u04d2"+ + "\u04d3\u0003\u0106{\u0000\u04d3\u04d4\u0001\u0000\u0000\u0000\u04d4\u04d5"+ + "\u0006\u0091\u001d\u0000\u04d5\u04d6\u0006\u0091\u001e\u0000\u04d6\u0133"+ + "\u0001\u0000\u0000\u0000\u04d7\u04d8\u0003D\u001a\u0000\u04d8\u04d9\u0001"+ + "\u0000\u0000\u0000\u04d9\u04da\u0006\u0092\u0014\u0000\u04da\u0135\u0001"+ + "\u0000\u0000\u0000\u04db\u04dc\u0003f+\u0000\u04dc\u04dd\u0001\u0000\u0000"+ + "\u0000\u04dd\u04de\u0006\u0093\u0015\u0000\u04de\u0137\u0001\u0000\u0000"+ + "\u0000\u04df\u04e0\u0003<\u0016\u0000\u04e0\u04e1\u0001\u0000\u0000\u0000"+ + "\u04e1\u04e2\u0006\u0094\u000b\u0000\u04e2\u0139\u0001\u0000\u0000\u0000"+ + "\u04e3\u04e4\u0003>\u0017\u0000\u04e4\u04e5\u0001\u0000\u0000\u0000\u04e5"+ + "\u04e6\u0006\u0095\u000b\u0000\u04e6\u013b\u0001\u0000\u0000\u0000\u04e7"+ + "\u04e8\u0003@\u0018\u0000\u04e8\u04e9\u0001\u0000\u0000\u0000\u04e9\u04ea"+ + "\u0006\u0096\u000b\u0000\u04ea\u013d\u0001\u0000\u0000\u0000\u04eb\u04ec"+ + "\u0003P \u0000\u04ec\u04ed\u0001\u0000\u0000\u0000\u04ed\u04ee\u0006\u0097"+ + "\u000e\u0000\u04ee\u04ef\u0006\u0097\u000f\u0000\u04ef\u04f0\u0006\u0097"+ + "\u000f\u0000\u04f0\u013f\u0001\u0000\u0000\u0000\u04f1\u04f2\u0003v3\u0000"+ + "\u04f2\u04f3\u0001\u0000\u0000\u0000\u04f3\u04f4\u0006\u0098\u0012\u0000"+ + "\u04f4\u0141\u0001\u0000\u0000\u0000\u04f5\u04f6\u0003z5\u0000\u04f6\u04f7"+ + "\u0001\u0000\u0000\u0000\u04f7\u04f8\u0006\u0099\u0016\u0000\u04f8\u0143"+ + "\u0001\u0000\u0000\u0000\u04f9\u04fa\u0003\u00e8l\u0000\u04fa\u04fb\u0001"+ + "\u0000\u0000\u0000\u04fb\u04fc\u0006\u009a\u0017\u0000\u04fc\u0145\u0001"+ + "\u0000\u0000\u0000\u04fd\u04fe\u0003<\u0016\u0000\u04fe\u04ff\u0001\u0000"+ + "\u0000\u0000\u04ff\u0500\u0006\u009b\u000b\u0000\u0500\u0147\u0001\u0000"+ + "\u0000\u0000\u0501\u0502\u0003>\u0017\u0000\u0502\u0503\u0001\u0000\u0000"+ + "\u0000\u0503\u0504\u0006\u009c\u000b\u0000\u0504\u0149\u0001\u0000\u0000"+ + "\u0000\u0505\u0506\u0003@\u0018\u0000\u0506\u0507\u0001\u0000\u0000\u0000"+ + "\u0507\u0508\u0006\u009d\u000b\u0000\u0508\u014b\u0001\u0000\u0000\u0000"+ + "\u0509\u050a\u0003P \u0000\u050a\u050b\u0001\u0000\u0000\u0000\u050b\u050c"+ + "\u0006\u009e\u000e\u0000\u050c\u050d\u0006\u009e\u000f\u0000\u050d\u014d"+ + "\u0001\u0000\u0000\u0000\u050e\u050f\u0003z5\u0000\u050f\u0510\u0001\u0000"+ + "\u0000\u0000\u0510\u0511\u0006\u009f\u0016\u0000\u0511\u014f\u0001\u0000"+ + "\u0000\u0000\u0512\u0513\u0003\u00beW\u0000\u0513\u0514\u0001\u0000\u0000"+ + "\u0000\u0514\u0515\u0006\u00a0\u001c\u0000\u0515\u0151\u0001\u0000\u0000"+ + "\u0000\u0516\u0517\u0003\u00baU\u0000\u0517\u0518\u0001\u0000\u0000\u0000"+ + "\u0518\u0519\u0006\u00a1\u001f\u0000\u0519\u0153\u0001\u0000\u0000\u0000"+ + "\u051a\u051b\u0003<\u0016\u0000\u051b\u051c\u0001\u0000\u0000\u0000\u051c"+ + "\u051d\u0006\u00a2\u000b\u0000\u051d\u0155\u0001\u0000\u0000\u0000\u051e"+ + "\u051f\u0003>\u0017\u0000\u051f\u0520\u0001\u0000\u0000\u0000\u0520\u0521"+ + "\u0006\u00a3\u000b\u0000\u0521\u0157\u0001\u0000\u0000\u0000\u0522\u0523"+ + "\u0003@\u0018\u0000\u0523\u0524\u0001\u0000\u0000\u0000\u0524\u0525\u0006"+ + "\u00a4\u000b\u0000\u0525\u0159\u0001\u0000\u0000\u0000\u0526\u0527\u0003"+ + "P \u0000\u0527\u0528\u0001\u0000\u0000\u0000\u0528\u0529\u0006\u00a5\u000e"+ + "\u0000\u0529\u052a\u0006\u00a5\u000f\u0000\u052a\u015b\u0001\u0000\u0000"+ + "\u0000\u052b\u052c\u0005i\u0000\u0000\u052c\u052d\u0005n\u0000\u0000\u052d"+ + "\u052e\u0005f\u0000\u0000\u052e\u052f\u0005o\u0000\u0000\u052f\u015d\u0001"+ + "\u0000\u0000\u0000\u0530\u0531\u0003<\u0016\u0000\u0531\u0532\u0001\u0000"+ + "\u0000\u0000\u0532\u0533\u0006\u00a7\u000b\u0000\u0533\u015f\u0001\u0000"+ + "\u0000\u0000\u0534\u0535\u0003>\u0017\u0000\u0535\u0536\u0001\u0000\u0000"+ + "\u0000\u0536\u0537\u0006\u00a8\u000b\u0000\u0537\u0161\u0001\u0000\u0000"+ + "\u0000\u0538\u0539\u0003@\u0018\u0000\u0539\u053a\u0001\u0000\u0000\u0000"+ + "\u053a\u053b\u0006\u00a9\u000b\u0000\u053b\u0163\u0001\u0000\u0000\u0000"+ + "\u053c\u053d\u0003P \u0000\u053d\u053e\u0001\u0000\u0000\u0000\u053e\u053f"+ + "\u0006\u00aa\u000e\u0000\u053f\u0540\u0006\u00aa\u000f\u0000\u0540\u0165"+ + "\u0001\u0000\u0000\u0000\u0541\u0542\u0005f\u0000\u0000\u0542\u0543\u0005"+ + "u\u0000\u0000\u0543\u0544\u0005n\u0000\u0000\u0544\u0545\u0005c\u0000"+ + "\u0000\u0545\u0546\u0005t\u0000\u0000\u0546\u0547\u0005i\u0000\u0000\u0547"+ + "\u0548\u0005o\u0000\u0000\u0548\u0549\u0005n\u0000\u0000\u0549\u054a\u0005"+ + "s\u0000\u0000\u054a\u0167\u0001\u0000\u0000\u0000\u054b\u054c\u0003<\u0016"+ + "\u0000\u054c\u054d\u0001\u0000\u0000\u0000\u054d\u054e\u0006\u00ac\u000b"+ + "\u0000\u054e\u0169\u0001\u0000\u0000\u0000\u054f\u0550\u0003>\u0017\u0000"+ + "\u0550\u0551\u0001\u0000\u0000\u0000\u0551\u0552\u0006\u00ad\u000b\u0000"+ + "\u0552\u016b\u0001\u0000\u0000\u0000\u0553\u0554\u0003@\u0018\u0000\u0554"+ + "\u0555\u0001\u0000\u0000\u0000\u0555\u0556\u0006\u00ae\u000b\u0000\u0556"+ + "\u016d\u0001\u0000\u0000\u0000\u0557\u0558\u0003\u00b8T\u0000\u0558\u0559"+ + "\u0001\u0000\u0000\u0000\u0559\u055a\u0006\u00af\u0010\u0000\u055a\u055b"+ + "\u0006\u00af\u000f\u0000\u055b\u016f\u0001\u0000\u0000\u0000\u055c\u055d"+ + "\u0005:\u0000\u0000\u055d\u0171\u0001\u0000\u0000\u0000\u055e\u0564\u0003"+ + "\\&\u0000\u055f\u0564\u0003R!\u0000\u0560\u0564\u0003z5\u0000\u0561\u0564"+ + "\u0003T\"\u0000\u0562\u0564\u0003b)\u0000\u0563\u055e\u0001\u0000\u0000"+ + "\u0000\u0563\u055f\u0001\u0000\u0000\u0000\u0563\u0560\u0001\u0000\u0000"+ + "\u0000\u0563\u0561\u0001\u0000\u0000\u0000\u0563\u0562\u0001\u0000\u0000"+ + "\u0000\u0564\u0565\u0001\u0000\u0000\u0000\u0565\u0563\u0001\u0000\u0000"+ + "\u0000\u0565\u0566\u0001\u0000\u0000\u0000\u0566\u0173\u0001\u0000\u0000"+ + "\u0000\u0567\u0568\u0003<\u0016\u0000\u0568\u0569\u0001\u0000\u0000\u0000"+ + "\u0569\u056a\u0006\u00b2\u000b\u0000\u056a\u0175\u0001\u0000\u0000\u0000"+ + "\u056b\u056c\u0003>\u0017\u0000\u056c\u056d\u0001\u0000\u0000\u0000\u056d"+ + "\u056e\u0006\u00b3\u000b\u0000\u056e\u0177\u0001\u0000\u0000\u0000\u056f"+ + "\u0570\u0003@\u0018\u0000\u0570\u0571\u0001\u0000\u0000\u0000\u0571\u0572"+ + "\u0006\u00b4\u000b\u0000\u0572\u0179\u0001\u0000\u0000\u0000\u0573\u0574"+ + "\u0003P \u0000\u0574\u0575\u0001\u0000\u0000\u0000\u0575\u0576\u0006\u00b5"+ + "\u000e\u0000\u0576\u0577\u0006\u00b5\u000f\u0000\u0577\u017b\u0001\u0000"+ + "\u0000\u0000\u0578\u0579\u0003D\u001a\u0000\u0579\u057a\u0001\u0000\u0000"+ + "\u0000\u057a\u057b\u0006\u00b6\u0014\u0000\u057b\u057c\u0006\u00b6\u000f"+ + "\u0000\u057c\u057d\u0006\u00b6 \u0000\u057d\u017d\u0001\u0000\u0000\u0000"+ + "\u057e\u057f\u0003f+\u0000\u057f\u0580\u0001\u0000\u0000\u0000\u0580\u0581"+ + "\u0006\u00b7\u0015\u0000\u0581\u0582\u0006\u00b7\u000f\u0000\u0582\u0583"+ + "\u0006\u00b7 \u0000\u0583\u017f\u0001\u0000\u0000\u0000\u0584\u0585\u0003"+ + "<\u0016\u0000\u0585\u0586\u0001\u0000\u0000\u0000\u0586\u0587\u0006\u00b8"+ + "\u000b\u0000\u0587\u0181\u0001\u0000\u0000\u0000\u0588\u0589\u0003>\u0017"+ + "\u0000\u0589\u058a\u0001\u0000\u0000\u0000\u058a\u058b\u0006\u00b9\u000b"+ + "\u0000\u058b\u0183\u0001\u0000\u0000\u0000\u058c\u058d\u0003@\u0018\u0000"+ + "\u058d\u058e\u0001\u0000\u0000\u0000\u058e\u058f\u0006\u00ba\u000b\u0000"+ + "\u058f\u0185\u0001\u0000\u0000\u0000\u0590\u0591\u0003\u0170\u00b0\u0000"+ + "\u0591\u0592\u0001\u0000\u0000\u0000\u0592\u0593\u0006\u00bb\u0011\u0000"+ + "\u0593\u0594\u0006\u00bb\u000f\u0000\u0594\u0595\u0006\u00bb\u0007\u0000"+ + "\u0595\u0187\u0001\u0000\u0000\u0000\u0596\u0597\u0003v3\u0000\u0597\u0598"+ + "\u0001\u0000\u0000\u0000\u0598\u0599\u0006\u00bc\u0012\u0000\u0599\u059a"+ + "\u0006\u00bc\u000f\u0000\u059a\u059b\u0006\u00bc\u0007\u0000\u059b\u0189"+ + "\u0001\u0000\u0000\u0000\u059c\u059d\u0003<\u0016\u0000\u059d\u059e\u0001"+ + "\u0000\u0000\u0000\u059e\u059f\u0006\u00bd\u000b\u0000\u059f\u018b\u0001"+ + "\u0000\u0000\u0000\u05a0\u05a1\u0003>\u0017\u0000\u05a1\u05a2\u0001\u0000"+ + "\u0000\u0000\u05a2\u05a3\u0006\u00be\u000b\u0000\u05a3\u018d\u0001\u0000"+ + "\u0000\u0000\u05a4\u05a5\u0003@\u0018\u0000\u05a5\u05a6\u0001\u0000\u0000"+ + "\u0000\u05a6\u05a7\u0006\u00bf\u000b\u0000\u05a7\u018f\u0001\u0000\u0000"+ + "\u0000\u05a8\u05a9\u0003\u00beW\u0000\u05a9\u05aa\u0001\u0000\u0000\u0000"+ + "\u05aa\u05ab\u0006\u00c0\u000f\u0000\u05ab\u05ac\u0006\u00c0\u0000\u0000"+ + "\u05ac\u05ad\u0006\u00c0\u001c\u0000\u05ad\u0191\u0001\u0000\u0000\u0000"+ + "\u05ae\u05af\u0003\u00baU\u0000\u05af\u05b0\u0001\u0000\u0000\u0000\u05b0"+ + "\u05b1\u0006\u00c1\u000f\u0000\u05b1\u05b2\u0006\u00c1\u0000\u0000\u05b2"+ + "\u05b3\u0006\u00c1\u001f\u0000\u05b3\u0193\u0001\u0000\u0000\u0000\u05b4"+ + "\u05b5\u0003l.\u0000\u05b5\u05b6\u0001\u0000\u0000\u0000\u05b6\u05b7\u0006"+ + "\u00c2\u000f\u0000\u05b7\u05b8\u0006\u00c2\u0000\u0000\u05b8\u05b9\u0006"+ + "\u00c2!\u0000\u05b9\u0195\u0001\u0000\u0000\u0000\u05ba\u05bb\u0003P "+ + "\u0000\u05bb\u05bc\u0001\u0000\u0000\u0000\u05bc\u05bd\u0006\u00c3\u000e"+ + "\u0000\u05bd\u05be\u0006\u00c3\u000f\u0000\u05be\u0197\u0001\u0000\u0000"+ + "\u0000B\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007\b\t\n\u000b\f"+ "\r\u000e\u000f\u024c\u0256\u025a\u025d\u0266\u0268\u0273\u027a\u027f\u02a6"+ "\u02ab\u02b4\u02bb\u02c0\u02c2\u02cd\u02d5\u02d8\u02da\u02df\u02e4\u02ea"+ - "\u02f1\u02f6\u02fc\u02ff\u0307\u030b\u038d\u0394\u0396\u03a6\u03ab\u03b0"+ - "\u03b2\u03b8\u0411\u0415\u041a\u041f\u0424\u0426\u042a\u042c\u0479\u047d"+ - "\u0482\u0560\u0562\"\u0005\u0002\u0000\u0005\u0004\u0000\u0005\u0006\u0000"+ - "\u0005\u0001\u0000\u0005\u0003\u0000\u0005\b\u0000\u0005\f\u0000\u0005"+ - "\u000e\u0000\u0005\n\u0000\u0005\u0005\u0000\u0005\u000b\u0000\u0000\u0001"+ - "\u0000\u0007G\u0000\u0005\u0000\u0000\u0007\u001e\u0000\u0004\u0000\u0000"+ - "\u0007H\u0000\u0007t\u0000\u0007\'\u0000\u0007%\u0000\u0007\u001a\u0000"+ - "\u0007\u001f\u0000\u0007)\u0000\u0007R\u0000\u0005\r\u0000\u0005\u0007"+ - "\u0000\u0007\\\u0000\u0007[\u0000\u0007J\u0000\u0007Z\u0000\u0005\t\u0000"+ - "\u0007I\u0000\u0005\u000f\u0000\u0007\"\u0000"; + "\u02f1\u02f6\u02fc\u02ff\u0307\u030b\u038b\u0390\u0397\u0399\u03a9\u03ae"+ + "\u03b3\u03b5\u03bb\u0414\u0418\u041d\u0422\u0427\u0429\u042d\u042f\u047c"+ + "\u0480\u0485\u0563\u0565\"\u0005\u0002\u0000\u0005\u0004\u0000\u0005\u0006"+ + "\u0000\u0005\u0001\u0000\u0005\u0003\u0000\u0005\b\u0000\u0005\f\u0000"+ + "\u0005\u000e\u0000\u0005\n\u0000\u0005\u0005\u0000\u0005\u000b\u0000\u0000"+ + "\u0001\u0000\u0007G\u0000\u0005\u0000\u0000\u0007\u001e\u0000\u0004\u0000"+ + "\u0000\u0007H\u0000\u0007t\u0000\u0007\'\u0000\u0007%\u0000\u0007\u001a"+ + "\u0000\u0007\u001f\u0000\u0007)\u0000\u0007R\u0000\u0005\r\u0000\u0005"+ + "\u0007\u0000\u0007\\\u0000\u0007[\u0000\u0007J\u0000\u0007Z\u0000\u0005"+ + "\t\u0000\u0007I\u0000\u0005\u000f\u0000\u0007\"\u0000"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java index b1dff5ce8c342..eef2dbbb53362 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java @@ -93,7 +93,9 @@ public void testNamedParams() throws IOException { String paramsString = """ ,"params":[ {"n1" : "8.15.0" }, { "n2" : 0.05 }, {"n3" : -799810013 }, - {"n4" : "127.0.0.1"}, {"n5" : "esql"}, {"n_6" : null}, {"n7_" : false}] }"""; + {"n4" : "127.0.0.1"}, {"n5" : "esql"}, {"n_6" : null}, {"n7_" : false}, + {"_n1" : "8.15.0" }, { "__n2" : 0.05 }, {"__3" : -799810013 }, + {"__4n" : "127.0.0.1"}, {"_n5" : "esql"}, {"_n6" : null}, {"_n7" : false}] }"""; List params = new ArrayList<>(4); params.add(new QueryParam("n1", "8.15.0", DataType.KEYWORD)); params.add(new QueryParam("n2", 0.05, DataType.DOUBLE)); @@ -102,6 +104,13 @@ public void testNamedParams() throws IOException { params.add(new QueryParam("n5", "esql", DataType.KEYWORD)); params.add(new QueryParam("n_6", null, DataType.NULL)); params.add(new QueryParam("n7_", false, DataType.BOOLEAN)); + params.add(new QueryParam("_n1", "8.15.0", DataType.KEYWORD)); + params.add(new QueryParam("__n2", 0.05, DataType.DOUBLE)); + params.add(new QueryParam("__3", -799810013, DataType.INTEGER)); + params.add(new QueryParam("__4n", "127.0.0.1", DataType.KEYWORD)); + params.add(new QueryParam("_n5", "esql", DataType.KEYWORD)); + params.add(new QueryParam("_n6", null, DataType.NULL)); + params.add(new QueryParam("_n7", false, DataType.BOOLEAN)); String json = String.format(Locale.ROOT, """ { "query": "%s", @@ -131,7 +140,7 @@ public void testInvalidParams() throws IOException { QueryBuilder filter = randomQueryBuilder(); String paramsString1 = """ - "params":[ {"1" : "v1" }, {"1x" : "v1" }, {"_a" : "v1" }, {"@-#" : "v1" }, 1, 2]"""; + "params":[ {"1" : "v1" }, {"1x" : "v1" }, {"@a" : "v1" }, {"@-#" : "v1" }, 1, 2, {"_1" : "v1" }, {"Å" : 0}, {"x " : 0}]"""; String json1 = String.format(Locale.ROOT, """ { %s @@ -146,16 +155,20 @@ public void testInvalidParams() throws IOException { e1.getCause().getMessage(), containsString( "Failed to parse params: [2:16] [1] is not a valid parameter name, " - + "a valid parameter name starts with a letter and contains letters, digits and underscores only" + + "a valid parameter name starts with a letter or underscore, and contains letters, digits and underscores only" ) ); assertThat(e1.getCause().getMessage(), containsString("[2:31] [1x] is not a valid parameter name")); - assertThat(e1.getCause().getMessage(), containsString("[2:47] [_a] is not a valid parameter name")); + assertThat(e1.getCause().getMessage(), containsString("[2:47] [@a] is not a valid parameter name")); assertThat(e1.getCause().getMessage(), containsString("[2:63] [@-#] is not a valid parameter name")); + assertThat(e1.getCause().getMessage(), containsString("[2:102] [Å] is not a valid parameter name")); + assertThat(e1.getCause().getMessage(), containsString("[2:113] [x ] is not a valid parameter name")); + assertThat( e1.getCause().getMessage(), containsString( - "Params cannot contain both named and unnamed parameters; got [{1:v1}, {1x:v1}, {_a:v1}, {@-#:v1}] and [{1}, {2}]" + "Params cannot contain both named and unnamed parameters; " + + "got [{1:v1}, {1x:v1}, {@a:v1}, {@-#:v1}, {_1:v1}, {Å:0}, {x :0}] and [{1}, {2}]" ) ); @@ -175,7 +188,7 @@ public void testInvalidParams() throws IOException { e2.getCause().getMessage(), containsString( "Failed to parse params: [2:22] [1] is not a valid parameter name, " - + "a valid parameter name starts with a letter and contains letters, digits and underscores only" + + "a valid parameter name starts with a letter or underscore, and contains letters, digits and underscores only" ) ); assertThat(e2.getCause().getMessage(), containsString("[2:37] [1x] is not a valid parameter name")); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index a5ef7900a1a78..6980171a7bcd7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -1019,7 +1019,7 @@ public void testInvalidNamedParams() { "Unknown query parameter [n2], did you mean any of [n3, n1]?" ); - expectError("from test | where x < ?_1", List.of(new QueryParam("_1", 5, INTEGER)), "extraneous input '_1' expecting "); + expectError("from test | where x < ?@1", List.of(new QueryParam("@1", 5, INTEGER)), "extraneous input '@1' expecting "); expectError("from test | where x < ?#1", List.of(new QueryParam("#1", 5, INTEGER)), "token recognition error at: '#'"); @@ -1028,6 +1028,10 @@ public void testInvalidNamedParams() { List.of(new QueryParam("n_1", 5, INTEGER), new QueryParam("n_2", 5, INTEGER)), "extraneous input '?' expecting " ); + + expectError("from test | where x < ?Å", List.of(new QueryParam("Å", 5, INTEGER)), "line 1:24: token recognition error at: 'Å'"); + + expectError("from test | eval x = ?Å", List.of(new QueryParam("Å", 5, INTEGER)), "line 1:23: token recognition error at: 'Å'"); } public void testPositionalParams() { @@ -1069,12 +1073,6 @@ public void testInvalidPositionalParams() { + "line 1:35: No parameter is defined for position 2, did you mean position 1?" ); - expectError( - "from test | where x < ?0 and y < ?2", - List.of(new QueryParam(null, 5, INTEGER)), - "No parameter is defined for position 2, did you mean position 1" - ); - expectError( "from test | where x < ?0", List.of(new QueryParam(null, 5, INTEGER), new QueryParam(null, 10, INTEGER)), @@ -1107,6 +1105,18 @@ public void testParamInWhere() { assertThat(limit.children().get(0).children().size(), equalTo(1)); assertThat(limit.children().get(0).children().get(0), instanceOf(UnresolvedRelation.class)); + plan = statement("from test | where x < ?_n1 | limit 10", new QueryParams(List.of(new QueryParam("_n1", 5, INTEGER)))); + assertThat(plan, instanceOf(Limit.class)); + limit = (Limit) plan; + assertThat(limit.limit(), instanceOf(Literal.class)); + assertThat(((Literal) limit.limit()).value(), equalTo(10)); + assertThat(limit.children().size(), equalTo(1)); + assertThat(limit.children().get(0), instanceOf(Filter.class)); + w = (Filter) limit.children().get(0); + assertThat(((Literal) w.condition().children().get(1)).value(), equalTo(5)); + assertThat(limit.children().get(0).children().size(), equalTo(1)); + assertThat(limit.children().get(0).children().get(0), instanceOf(UnresolvedRelation.class)); + plan = statement("from test | where x < ?1 | limit 10", new QueryParams(List.of(new QueryParam(null, 5, INTEGER)))); assertThat(plan, instanceOf(Limit.class)); limit = (Limit) plan; @@ -1118,6 +1128,18 @@ public void testParamInWhere() { assertThat(((Literal) w.condition().children().get(1)).value(), equalTo(5)); assertThat(limit.children().get(0).children().size(), equalTo(1)); assertThat(limit.children().get(0).children().get(0), instanceOf(UnresolvedRelation.class)); + + plan = statement("from test | where x < ?__1 | limit 10", new QueryParams(List.of(new QueryParam("__1", 5, INTEGER)))); + assertThat(plan, instanceOf(Limit.class)); + limit = (Limit) plan; + assertThat(limit.limit(), instanceOf(Literal.class)); + assertThat(((Literal) limit.limit()).value(), equalTo(10)); + assertThat(limit.children().size(), equalTo(1)); + assertThat(limit.children().get(0), instanceOf(Filter.class)); + w = (Filter) limit.children().get(0); + assertThat(((Literal) w.condition().children().get(1)).value(), equalTo(5)); + assertThat(limit.children().get(0).children().size(), equalTo(1)); + assertThat(limit.children().get(0).children().get(0), instanceOf(UnresolvedRelation.class)); } public void testParamInEval() { @@ -1161,6 +1183,26 @@ public void testParamInEval() { assertThat(f.children().size(), equalTo(1)); assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); + plan = statement( + "from test | where x < ?_n1 | eval y = ?_n2 + ?_n3 | limit 10", + new QueryParams( + List.of(new QueryParam("_n1", 5, INTEGER), new QueryParam("_n2", -1, INTEGER), new QueryParam("_n3", 100, INTEGER)) + ) + ); + assertThat(plan, instanceOf(Limit.class)); + limit = (Limit) plan; + assertThat(limit.limit(), instanceOf(Literal.class)); + assertThat(((Literal) limit.limit()).value(), equalTo(10)); + assertThat(limit.children().size(), equalTo(1)); + assertThat(limit.children().get(0), instanceOf(Eval.class)); + eval = (Eval) limit.children().get(0); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).left()).value(), equalTo(-1)); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).right()).value(), equalTo(100)); + f = (Filter) eval.children().get(0); + assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); + assertThat(f.children().size(), equalTo(1)); + assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); + plan = statement( "from test | where x < ?1 | eval y = ?2 + ?1 | limit 10", new QueryParams(List.of(new QueryParam(null, 5, INTEGER), new QueryParam(null, -1, INTEGER))) @@ -1178,6 +1220,24 @@ public void testParamInEval() { assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); assertThat(f.children().size(), equalTo(1)); assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); + + plan = statement( + "from test | where x < ?_1 | eval y = ?_2 + ?_1 | limit 10", + new QueryParams(List.of(new QueryParam("_1", 5, INTEGER), new QueryParam("_2", -1, INTEGER))) + ); + assertThat(plan, instanceOf(Limit.class)); + limit = (Limit) plan; + assertThat(limit.limit(), instanceOf(Literal.class)); + assertThat(((Literal) limit.limit()).value(), equalTo(10)); + assertThat(limit.children().size(), equalTo(1)); + assertThat(limit.children().get(0), instanceOf(Eval.class)); + eval = (Eval) limit.children().get(0); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).left()).value(), equalTo(-1)); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).right()).value(), equalTo(5)); + f = (Filter) eval.children().get(0); + assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); + assertThat(f.children().size(), equalTo(1)); + assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); } public void testParamInAggFunction() { @@ -1231,6 +1291,31 @@ public void testParamInAggFunction() { assertThat(f.children().size(), equalTo(1)); assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); + plan = statement( + "from test | where x < ?_n1 | eval y = ?_n2 + ?_n3 | stats count(?_n4) by z", + new QueryParams( + List.of( + new QueryParam("_n1", 5, INTEGER), + new QueryParam("_n2", -1, INTEGER), + new QueryParam("_n3", 100, INTEGER), + new QueryParam("_n4", "*", KEYWORD) + ) + ) + ); + assertThat(plan, instanceOf(Aggregate.class)); + agg = (Aggregate) plan; + assertThat(((Literal) agg.aggregates().get(0).children().get(0).children().get(0)).value(), equalTo("*")); + assertThat(agg.child(), instanceOf(Eval.class)); + assertThat(agg.children().size(), equalTo(1)); + assertThat(agg.children().get(0), instanceOf(Eval.class)); + eval = (Eval) agg.children().get(0); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).left()).value(), equalTo(-1)); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).right()).value(), equalTo(100)); + f = (Filter) eval.children().get(0); + assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); + assertThat(f.children().size(), equalTo(1)); + assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); + plan = statement( "from test | where x < ?1 | eval y = ?2 + ?1 | stats count(?3) by z", new QueryParams( @@ -1250,6 +1335,26 @@ public void testParamInAggFunction() { assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); assertThat(f.children().size(), equalTo(1)); assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); + + plan = statement( + "from test | where x < ?_1 | eval y = ?_2 + ?_1 | stats count(?_3) by z", + new QueryParams( + List.of(new QueryParam("_1", 5, INTEGER), new QueryParam("_2", -1, INTEGER), new QueryParam("_3", "*", KEYWORD)) + ) + ); + assertThat(plan, instanceOf(Aggregate.class)); + agg = (Aggregate) plan; + assertThat(((Literal) agg.aggregates().get(0).children().get(0).children().get(0)).value(), equalTo("*")); + assertThat(agg.child(), instanceOf(Eval.class)); + assertThat(agg.children().size(), equalTo(1)); + assertThat(agg.children().get(0), instanceOf(Eval.class)); + eval = (Eval) agg.children().get(0); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).left()).value(), equalTo(-1)); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).right()).value(), equalTo(5)); + f = (Filter) eval.children().get(0); + assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); + assertThat(f.children().size(), equalTo(1)); + assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); } public void testParamMixed() { @@ -1266,24 +1371,36 @@ public void testParamMixed() { ); expectError( - "from test | where x < ?1 | eval y = ?n2 + ?n3 | limit ?n4", + "from test | where x < ? | eval y = ?_n2 + ?n3 | limit ?_4", List.of( new QueryParam("n1", 5, INTEGER), - new QueryParam("n2", -1, INTEGER), + new QueryParam("_n2", -1, INTEGER), new QueryParam("n3", 100, INTEGER), new QueryParam("n4", 10, INTEGER) ), + "Inconsistent parameter declaration, " + + "use one of positional, named or anonymous params but not a combination of named and anonymous" + ); + + expectError( + "from test | where x < ?1 | eval y = ?n2 + ?_n3 | limit ?n4", + List.of( + new QueryParam("n1", 5, INTEGER), + new QueryParam("n2", -1, INTEGER), + new QueryParam("_n3", 100, INTEGER), + new QueryParam("n4", 10, INTEGER) + ), "Inconsistent parameter declaration, " + "use one of positional, named or anonymous params but not a combination of named and positional" ); expectError( - "from test | where x < ? | eval y = ?2 + ?n3 | limit ?n4", + "from test | where x < ? | eval y = ?2 + ?n3 | limit ?_n4", List.of( new QueryParam("n1", 5, INTEGER), new QueryParam("n2", -1, INTEGER), new QueryParam("n3", 100, INTEGER), - new QueryParam("n4", 10, INTEGER) + new QueryParam("_n4", 10, INTEGER) ), "Inconsistent parameter declaration, " + "use one of positional, named or anonymous params but not a combination of positional and anonymous" @@ -1536,6 +1653,22 @@ public void testSimpleMetricsWithStats() { ); } + public void testInvalidAlias() { + expectError("row Å = 1", "line 1:5: token recognition error at: 'Å'"); + expectError("from test | eval Å = 1", "line 1:18: token recognition error at: 'Å'"); + expectError("from test | where Å == 1", "line 1:19: token recognition error at: 'Å'"); + expectError("from test | keep Å", "line 1:18: token recognition error at: 'Å'"); + expectError("from test | drop Å", "line 1:18: token recognition error at: 'Å'"); + expectError("from test | sort Å", "line 1:18: token recognition error at: 'Å'"); + expectError("from test | rename Å as A", "line 1:20: token recognition error at: 'Å'"); + expectError("from test | rename A as Å", "line 1:25: token recognition error at: 'Å'"); + expectError("from test | rename Å as Å", "line 1:20: token recognition error at: 'Å'"); + expectError("from test | stats Å = count(*)", "line 1:19: token recognition error at: 'Å'"); + expectError("from test | stats count(Å)", "line 1:25: token recognition error at: 'Å'"); + expectError("from test | eval A = coalesce(Å, null)", "line 1:31: token recognition error at: 'Å'"); + expectError("from test | eval A = coalesce(\"Å\", Å)", "line 1:36: token recognition error at: 'Å'"); + } + private LogicalPlan unresolvedRelation(String index) { return new UnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, index), false, List.of(), IndexMode.STANDARD, null); } From a02dc7165c75f12701f8d47a2bdefe5283735267 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 26 Aug 2024 13:39:19 -0500 Subject: [PATCH 210/389] Improve performance of grok pattern cycle detection (#111947) --- docs/changelog/111947.yaml | 5 + .../org/elasticsearch/grok/PatternBank.java | 144 +++++++++----- .../elasticsearch/grok/PatternBankTests.java | 179 ++++++++++++++++-- 3 files changed, 267 insertions(+), 61 deletions(-) create mode 100644 docs/changelog/111947.yaml diff --git a/docs/changelog/111947.yaml b/docs/changelog/111947.yaml new file mode 100644 index 0000000000000..0aff0b9c7b8be --- /dev/null +++ b/docs/changelog/111947.yaml @@ -0,0 +1,5 @@ +pr: 111947 +summary: Improve performance of grok pattern cycle detection +area: Ingest Node +type: bug +issues: [] diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/PatternBank.java b/libs/grok/src/main/java/org/elasticsearch/grok/PatternBank.java index bcf9253866931..3b10d58815169 100644 --- a/libs/grok/src/main/java/org/elasticsearch/grok/PatternBank.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/PatternBank.java @@ -8,12 +8,17 @@ package org.elasticsearch.grok; +import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; +import java.util.Deque; +import java.util.HashSet; import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; public class PatternBank { @@ -57,52 +62,102 @@ public PatternBank extendWith(Map extraPatterns) { } /** - * Checks whether patterns reference each other in a circular manner and if so fail with an exception. + * Checks whether patterns reference each other in a circular manner and if so fail with an IllegalArgumentException. It will also + * fail if any pattern value contains a pattern name that does not exist in the bank. *

    * In a pattern, anything between %{ and } or : is considered * a reference to another named pattern. This method will navigate to all these named patterns and * check for a circular reference. */ static void forbidCircularReferences(Map bank) { - // first ensure that the pattern bank contains no simple circular references (i.e., any pattern - // containing an immediate reference to itself) as those can cause the remainder of this algorithm - // to recurse infinitely - for (Map.Entry entry : bank.entrySet()) { - if (patternReferencesItself(entry.getValue(), entry.getKey())) { - throw new IllegalArgumentException("circular reference in pattern [" + entry.getKey() + "][" + entry.getValue() + "]"); + Set allVisitedNodes = new HashSet<>(); + Set nodesVisitedMoreThanOnceInAPath = new HashSet<>(); + // Walk the full path starting at each node in the graph: + for (String traversalStartNode : bank.keySet()) { + if (nodesVisitedMoreThanOnceInAPath.contains(traversalStartNode) == false && allVisitedNodes.contains(traversalStartNode)) { + // If we have seen this node before in a path, and it only appeared once in that path, there is no need to check it again + continue; } - } - - // next, recursively check any other pattern names referenced in each pattern - for (Map.Entry entry : bank.entrySet()) { - String name = entry.getKey(); - String pattern = entry.getValue(); - innerForbidCircularReferences(bank, name, new ArrayList<>(), pattern); + Set visitedFromThisStartNode = new LinkedHashSet<>(); + /* + * This stack records where we are in the graph. Each String[] in the stack represents a collection of neighbors to the first + * non-null node in the layer below it. Null means that the path from that location has been fully traversed. Once all nodes + * at a layer have been set to null, the layer is popped. So for example say we have the graph + * ( 1 -> (2 -> (4, 5, 8), 3 -> (6, 7))) then when we are at 6 via 1 -> 3 -> 6, the stack looks like this: + * [6, 7] + * [null, 3] + * [1] + */ + Deque stack = new ArrayDeque<>(); + stack.push(new String[] { traversalStartNode }); + // This is used so that we know that we're unwinding the stack and know not to get the current node's neighbors again. + boolean unwinding = false; + while (stack.isEmpty() == false) { + String[] currentLevel = stack.peek(); + int firstNonNullIndex = findFirstNonNull(currentLevel); + String node = currentLevel[firstNonNullIndex]; + boolean endOfThisPath = false; + if (unwinding) { + // We have completed all of this node's neighbors and have popped back to the node + endOfThisPath = true; + } else if (traversalStartNode.equals(node) && stack.size() > 1) { + Deque reversedPath = new ArrayDeque<>(); + for (String[] level : stack) { + reversedPath.push(level[findFirstNonNull(level)]); + } + throw new IllegalArgumentException("circular reference detected: " + String.join("->", reversedPath)); + } else if (visitedFromThisStartNode.contains(node)) { + /* + * We are only looking for a cycle starting and ending at traversalStartNode right now. But this node has been + * visited more than once in the path rooted at traversalStartNode. This could be because it is a cycle, or could be + * because two nodes in the path both point to it. We add it to nodesVisitedMoreThanOnceInAPath so that we make sure + * to check the path rooted at this node later. + */ + nodesVisitedMoreThanOnceInAPath.add(node); + endOfThisPath = true; + } else { + visitedFromThisStartNode.add(node); + String[] neighbors = getPatternNamesForPattern(bank, node); + if (neighbors.length == 0) { + endOfThisPath = true; + } else { + stack.push(neighbors); + } + } + if (endOfThisPath) { + if (firstNonNullIndex == currentLevel.length - 1) { + // We have handled all the neighbors at this level -- there are no more non-null ones + stack.pop(); + unwinding = true; + } else { + currentLevel[firstNonNullIndex] = null; + unwinding = false; + } + } else { + unwinding = false; + } + } + allVisitedNodes.addAll(visitedFromThisStartNode); } } - private static void innerForbidCircularReferences(Map bank, String patternName, List path, String pattern) { - if (patternReferencesItself(pattern, patternName)) { - String message; - if (path.isEmpty()) { - message = "circular reference in pattern [" + patternName + "][" + pattern + "]"; - } else { - message = "circular reference in pattern [" - + path.remove(path.size() - 1) - + "][" - + pattern - + "] back to pattern [" - + patternName - + "]"; - // add rest of the path: - if (path.isEmpty() == false) { - message += " via patterns [" + String.join("=>", path) + "]"; - } + private static int findFirstNonNull(String[] level) { + for (int i = 0; i < level.length; i++) { + if (level[i] != null) { + return i; } - throw new IllegalArgumentException(message); } + return -1; + } - // next check any other pattern names found in the pattern + /** + * This method returns the array of pattern names (if any) found in the bank for the pattern named patternName. If no pattern names + * are found, an empty array is returned. If any of the list of pattern names to be returned does not exist in the bank, an exception + * is thrown. + */ + private static String[] getPatternNamesForPattern(Map bank, String patternName) { + String pattern = bank.get(patternName); + List patternReferences = new ArrayList<>(); for (int i = pattern.indexOf("%{"); i != -1; i = pattern.indexOf("%{", i + 1)) { int begin = i + 2; int bracketIndex = pattern.indexOf('}', begin); @@ -112,25 +167,22 @@ private static void innerForbidCircularReferences(Map bank, Stri end = bracketIndex; } else if (columnIndex != -1 && bracketIndex == -1) { end = columnIndex; - } else if (bracketIndex != -1 && columnIndex != -1) { + } else if (bracketIndex != -1) { end = Math.min(bracketIndex, columnIndex); } else { throw new IllegalArgumentException("pattern [" + pattern + "] has an invalid syntax"); } String otherPatternName = pattern.substring(begin, end); - path.add(otherPatternName); - String otherPattern = bank.get(otherPatternName); - if (otherPattern == null) { - throw new IllegalArgumentException( - "pattern [" + patternName + "] is referencing a non-existent pattern [" + otherPatternName + "]" - ); + if (patternReferences.contains(otherPatternName) == false) { + patternReferences.add(otherPatternName); + String otherPattern = bank.get(otherPatternName); + if (otherPattern == null) { + throw new IllegalArgumentException( + "pattern [" + patternName + "] is referencing a non-existent pattern [" + otherPatternName + "]" + ); + } } - - innerForbidCircularReferences(bank, patternName, path, otherPattern); } - } - - private static boolean patternReferencesItself(String pattern, String patternName) { - return pattern.contains("%{" + patternName + "}") || pattern.contains("%{" + patternName + ":"); + return patternReferences.toArray(new String[0]); } } diff --git a/libs/grok/src/test/java/org/elasticsearch/grok/PatternBankTests.java b/libs/grok/src/test/java/org/elasticsearch/grok/PatternBankTests.java index dcc7ab431611a..08a4965cdb371 100644 --- a/libs/grok/src/test/java/org/elasticsearch/grok/PatternBankTests.java +++ b/libs/grok/src/test/java/org/elasticsearch/grok/PatternBankTests.java @@ -11,8 +11,13 @@ import org.elasticsearch.test.ESTestCase; import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.Map; -import java.util.TreeMap; +import java.util.Set; + +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.hamcrest.Matchers.containsString; public class PatternBankTests extends ESTestCase { @@ -32,7 +37,7 @@ public void testBankCannotBeNull() { public void testConstructorValidatesCircularReferences() { var e = expectThrows(IllegalArgumentException.class, () -> new PatternBank(Map.of("NAME", "!!!%{NAME}!!!"))); - assertEquals("circular reference in pattern [NAME][!!!%{NAME}!!!]", e.getMessage()); + assertEquals("circular reference detected: NAME->NAME", e.getMessage()); } public void testExtendWith() { @@ -48,36 +53,36 @@ public void testExtendWith() { public void testCircularReference() { var e = expectThrows(IllegalArgumentException.class, () -> PatternBank.forbidCircularReferences(Map.of("NAME", "!!!%{NAME}!!!"))); - assertEquals("circular reference in pattern [NAME][!!!%{NAME}!!!]", e.getMessage()); + assertEquals("circular reference detected: NAME->NAME", e.getMessage()); e = expectThrows(IllegalArgumentException.class, () -> PatternBank.forbidCircularReferences(Map.of("NAME", "!!!%{NAME:name}!!!"))); - assertEquals("circular reference in pattern [NAME][!!!%{NAME:name}!!!]", e.getMessage()); + assertEquals("circular reference detected: NAME->NAME", e.getMessage()); e = expectThrows( IllegalArgumentException.class, () -> { PatternBank.forbidCircularReferences(Map.of("NAME", "!!!%{NAME:name:int}!!!")); } ); - assertEquals("circular reference in pattern [NAME][!!!%{NAME:name:int}!!!]", e.getMessage()); + assertEquals("circular reference detected: NAME->NAME", e.getMessage()); e = expectThrows(IllegalArgumentException.class, () -> { - Map bank = new TreeMap<>(); + Map bank = new LinkedHashMap<>(); bank.put("NAME1", "!!!%{NAME2}!!!"); bank.put("NAME2", "!!!%{NAME1}!!!"); PatternBank.forbidCircularReferences(bank); }); - assertEquals("circular reference in pattern [NAME2][!!!%{NAME1}!!!] back to pattern [NAME1]", e.getMessage()); + assertEquals("circular reference detected: NAME1->NAME2->NAME1", e.getMessage()); e = expectThrows(IllegalArgumentException.class, () -> { - Map bank = new TreeMap<>(); + Map bank = new LinkedHashMap<>(); bank.put("NAME1", "!!!%{NAME2}!!!"); bank.put("NAME2", "!!!%{NAME3}!!!"); bank.put("NAME3", "!!!%{NAME1}!!!"); PatternBank.forbidCircularReferences(bank); }); - assertEquals("circular reference in pattern [NAME3][!!!%{NAME1}!!!] back to pattern [NAME1] via patterns [NAME2]", e.getMessage()); + assertEquals("circular reference detected: NAME1->NAME2->NAME3->NAME1", e.getMessage()); e = expectThrows(IllegalArgumentException.class, () -> { - Map bank = new TreeMap<>(); + Map bank = new LinkedHashMap<>(); bank.put("NAME1", "!!!%{NAME2}!!!"); bank.put("NAME2", "!!!%{NAME3}!!!"); bank.put("NAME3", "!!!%{NAME4}!!!"); @@ -85,10 +90,78 @@ public void testCircularReference() { bank.put("NAME5", "!!!%{NAME1}!!!"); PatternBank.forbidCircularReferences(bank); }); - assertEquals( - "circular reference in pattern [NAME5][!!!%{NAME1}!!!] back to pattern [NAME1] via patterns [NAME2=>NAME3=>NAME4]", - e.getMessage() - ); + assertEquals("circular reference detected: NAME1->NAME2->NAME3->NAME4->NAME5->NAME1", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new LinkedHashMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!"); + bank.put("NAME2", "!!!%{NAME3}!!!"); + bank.put("NAME3", "!!!%{NAME2}!!!"); + PatternBank.forbidCircularReferences(bank); + }); + assertEquals("circular reference detected: NAME2->NAME3->NAME2", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new LinkedHashMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!"); + bank.put("NAME2", "!!!%{NAME2}!!%{NAME3}!"); + bank.put("NAME3", "!!!%{NAME1}!!!"); + PatternBank.forbidCircularReferences(bank); + }); + assertEquals("circular reference detected: NAME1->NAME2->NAME3->NAME1", e.getMessage()); + + { + Map bank = new HashMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!%{NAME3}%{NAME4}"); + bank.put("NAME2", "!!!%{NAME3}!!!"); + bank.put("NAME3", "!!!!!!"); + bank.put("NAME4", "!!!%{NAME5}!!!"); + bank.put("NAME5", "!!!!!!"); + PatternBank.forbidCircularReferences(bank); + } + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new LinkedHashMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!%{NAME3}%{NAME4}"); + bank.put("NAME2", "!!!%{NAME3}!!!"); + bank.put("NAME3", "!!!!!!"); + bank.put("NAME4", "!!!%{NAME5}!!!"); + bank.put("NAME5", "!!!%{NAME1}!!!"); + PatternBank.forbidCircularReferences(bank); + }); + assertEquals("circular reference detected: NAME1->NAME4->NAME5->NAME1", e.getMessage()); + + { + Map bank = new HashMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!"); + bank.put("NAME2", "!!!%{NAME3}!!!"); + bank.put("NAME3", "!!!!!!"); + bank.put("NAME4", "!!!%{NAME5}!!!"); + bank.put("NAME5", "!!!%{NAME1}!!!"); + PatternBank.forbidCircularReferences(bank); + } + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new LinkedHashMap<>(); + bank.put("NAME1", "!!!%{NAME2} %{NAME3}!!!"); + bank.put("NAME2", "!!!%{NAME4} %{NAME5}!!!"); + bank.put("NAME3", "!!!!!!"); + bank.put("NAME4", "!!!!!!"); + bank.put("NAME5", "!!!%{NAME1}!!!"); + PatternBank.forbidCircularReferences(bank); + }); + assertEquals("circular reference detected: NAME1->NAME2->NAME5->NAME1", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new LinkedHashMap<>(); + bank.put("NAME1", "!!!%{NAME2} %{NAME3}!!!"); + bank.put("NAME2", "!!!%{NAME4} %{NAME5}!!!"); + bank.put("NAME3", "!!!%{NAME1}!!!"); + bank.put("NAME4", "!!!!!!"); + bank.put("NAME5", "!!!!!!"); + PatternBank.forbidCircularReferences(bank); + }); + assertEquals("circular reference detected: NAME1->NAME3->NAME1", e.getMessage()); } public void testCircularSelfReference() { @@ -96,7 +169,7 @@ public void testCircularSelfReference() { IllegalArgumentException.class, () -> PatternBank.forbidCircularReferences(Map.of("ANOTHER", "%{INT}", "INT", "%{INT}")) ); - assertEquals("circular reference in pattern [INT][%{INT}]", e.getMessage()); + assertEquals("circular reference detected: INT->INT", e.getMessage()); } public void testInvalidPatternReferences() { @@ -112,4 +185,80 @@ public void testInvalidPatternReferences() { ); assertEquals("pattern [%{VALID] has an invalid syntax", e.getMessage()); } + + public void testDeepGraphOfPatterns() { + Map patternBankMap = randomBoolean() ? new HashMap<>() : new LinkedHashMap<>(); + final int nodeCount = 20_000; + for (int i = 0; i < nodeCount - 1; i++) { + patternBankMap.put("FOO" + i, "%{FOO" + (i + 1) + "}"); + } + patternBankMap.put("FOO" + (nodeCount - 1), "foo"); + new PatternBank(patternBankMap); + } + + public void testRandomBanksWithoutCycles() { + /* + * This creates a large number of pattens, each of which refers to a large number of patterns. But there are no cycles in any of + * these since each pattern only references patterns with a higher ID. We don't expect any exceptions here. + */ + Map patternBankMap = randomBoolean() ? new HashMap<>() : new LinkedHashMap<>(); + final int nodeCount = 500; + for (int i = 0; i < nodeCount - 1; i++) { + StringBuilder patternBuilder = new StringBuilder(); + for (int j = 0; j < randomIntBetween(0, 20); j++) { + patternBuilder.append("%{FOO-" + randomIntBetween(i + 1, nodeCount - 1) + "}"); + } + patternBankMap.put("FOO-" + i, patternBuilder.toString()); + } + patternBankMap.put("FOO-" + (nodeCount - 1), "foo"); + new PatternBank(patternBankMap); + } + + public void testRandomBanksWithCycles() { + /* + * This creates a large number of pattens, each of which refers to a large number of patterns. We have at least one cycle because + * we pick a node at random, and make sure that a node that it links (or one of its descendants) to links back. If no descendant + * links back to it, we create an artificial cycle at the end. + */ + Map patternBankMap = new LinkedHashMap<>(); + final int nodeCount = 500; + int nodeToHaveCycle = randomIntBetween(0, nodeCount); + int nodeToPotentiallyCreateCycle = -1; + boolean haveCreatedCycle = false; + for (int i = 0; i < nodeCount - 1; i++) { + StringBuilder patternBuilder = new StringBuilder(); + int numberOfLinkedPatterns = randomIntBetween(1, 20); + int nodeToLinkBackIndex = randomIntBetween(0, numberOfLinkedPatterns); + Set childNodes = new HashSet<>(); + for (int j = 0; j < numberOfLinkedPatterns; j++) { + int childNode = randomIntBetween(i + 1, nodeCount - 1); + childNodes.add(childNode); + patternBuilder.append("%{FOO-" + childNode + "}"); + if (i == nodeToHaveCycle) { + if (nodeToLinkBackIndex == j) { + nodeToPotentiallyCreateCycle = childNode; + } + } + } + if (i == nodeToPotentiallyCreateCycle) { + // We either create the cycle here, or randomly pick a child node to maybe create the cycle + if (randomBoolean()) { + patternBuilder.append("%{FOO-" + nodeToHaveCycle + "}"); + haveCreatedCycle = true; + } else { + nodeToPotentiallyCreateCycle = randomFrom(childNodes); + } + } + patternBankMap.put("FOO-" + i, patternBuilder.toString()); + } + if (haveCreatedCycle) { + patternBankMap.put("FOO-" + (nodeCount - 1), "foo"); + } else { + // We didn't randomly create a cycle, so just force one in this last pattern + nodeToHaveCycle = nodeCount - 1; + patternBankMap.put("FOO-" + nodeToHaveCycle, "%{FOO-" + nodeToHaveCycle + "}"); + } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new PatternBank(patternBankMap)); + assertThat(e.getMessage(), containsString("FOO-" + nodeToHaveCycle)); + } } From 74d964b9b16b7c8837ec0869b15e2c2bdee416cd Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 26 Aug 2024 15:01:22 -0400 Subject: [PATCH 211/389] ESQL: Fix a bug in `MV_PERCENTILE` (#112218) This fixes a bug in `MV_PERCENTILE` that was producing incorrect results on when the `Block` was in ascending order. We were always reading from the first entry in the block. Closes #112188 Closes #112187 Closes #112193 Closes #112180 --- docs/changelog/112218.yaml | 9 + muted-tests.yml | 12 -- .../scalar/multivalue/MvPercentile.java | 22 ++- .../multivalue/MvPercentileSimpleTests.java | 154 ++++++++++++++++++ .../scalar/multivalue/MvPercentileTests.java | 17 +- 5 files changed, 194 insertions(+), 20 deletions(-) create mode 100644 docs/changelog/112218.yaml create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileSimpleTests.java diff --git a/docs/changelog/112218.yaml b/docs/changelog/112218.yaml new file mode 100644 index 0000000000000..c426dd7ade4ed --- /dev/null +++ b/docs/changelog/112218.yaml @@ -0,0 +1,9 @@ +pr: 112218 +summary: "ESQL: Fix a bug in `MV_PERCENTILE`" +area: ES|QL +type: bug +issues: + - 112193 + - 112180 + - 112187 + - 112188 diff --git a/muted-tests.yml b/muted-tests.yml index 85c29759cabb2..eff599b758a1d 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -151,24 +151,12 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/112144 - class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/112147 -- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - method: test {mv_percentile.FromIndex SYNC} - issue: https://github.com/elastic/elasticsearch/issues/112180 -- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT - method: test {mv_percentile.FromIndex SYNC} - issue: https://github.com/elastic/elasticsearch/issues/112187 -- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT - method: test {mv_percentile.FromIndex ASYNC} - issue: https://github.com/elastic/elasticsearch/issues/112188 - class: org.elasticsearch.smoketest.WatcherYamlRestIT method: test {p0=watcher/usage/10_basic/Test watcher usage stats output} issue: https://github.com/elastic/elasticsearch/issues/112189 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=ml/inference_processor/Test create processor with missing mandatory fields} issue: https://github.com/elastic/elasticsearch/issues/112191 -- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - method: test {mv_percentile.FromIndex ASYNC} - issue: https://github.com/elastic/elasticsearch/issues/112193 - class: org.elasticsearch.xpack.ml.integration.MlJobIT method: testDeleteJobAsync issue: https://github.com/elastic/elasticsearch/issues/112212 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java index b1e710b9b2a40..1eb0c70a7b08e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java @@ -233,7 +233,7 @@ static void process( // Percentile calculators - private static double calculateDoublePercentile( + static double calculateDoublePercentile( DoubleBlock valuesBlock, int firstValueIndex, int valueCount, @@ -257,7 +257,11 @@ private static double calculateDoublePercentile( return valuesBlock.getDouble(valueCount - 1); } else { assert lowerIndex >= 0 && upperIndex < valueCount; - return calculateDoublePercentile(fraction, valuesBlock.getDouble(lowerIndex), valuesBlock.getDouble(upperIndex)); + return calculateDoublePercentile( + fraction, + valuesBlock.getDouble(firstValueIndex + lowerIndex), + valuesBlock.getDouble(firstValueIndex + upperIndex) + ); } } @@ -289,7 +293,7 @@ private static double calculateDoublePercentile( return calculateDoublePercentile(fraction, scratch.values[lowerIndex], scratch.values[upperIndex]); } - private static int calculateIntPercentile( + static int calculateIntPercentile( IntBlock valuesBlock, int firstValueIndex, int valueCount, @@ -313,8 +317,8 @@ private static int calculateIntPercentile( return valuesBlock.getInt(valueCount - 1); } else { assert lowerIndex >= 0 && upperIndex < valueCount; - var lowerValue = valuesBlock.getInt(lowerIndex); - var upperValue = valuesBlock.getInt(upperIndex); + var lowerValue = valuesBlock.getInt(firstValueIndex + lowerIndex); + var upperValue = valuesBlock.getInt(firstValueIndex + upperIndex); var difference = (long) upperValue - lowerValue; return lowerValue + (int) (fraction * difference); } @@ -351,7 +355,7 @@ private static int calculateIntPercentile( return lowerValue + (int) (fraction * difference); } - private static long calculateLongPercentile( + static long calculateLongPercentile( LongBlock valuesBlock, int firstValueIndex, int valueCount, @@ -375,7 +379,11 @@ private static long calculateLongPercentile( return valuesBlock.getLong(valueCount - 1); } else { assert lowerIndex >= 0 && upperIndex < valueCount; - return calculateLongPercentile(fraction, valuesBlock.getLong(lowerIndex), valuesBlock.getLong(upperIndex)); + return calculateLongPercentile( + fraction, + valuesBlock.getLong(firstValueIndex + lowerIndex), + valuesBlock.getLong(firstValueIndex + upperIndex) + ); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileSimpleTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileSimpleTests.java new file mode 100644 index 0000000000000..81ae8efb7aba7 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileSimpleTests.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.TestBlockFactory; + +import static org.hamcrest.Matchers.equalTo; + +public class MvPercentileSimpleTests extends ESTestCase { + public void testDoubleMvAsc() { + try (DoubleBlock.Builder builder = TestBlockFactory.getNonBreakingInstance().newDoubleBlockBuilder(10)) { + builder.beginPositionEntry(); + builder.appendDouble(80); + builder.appendDouble(90); + builder.endPositionEntry(); + builder.beginPositionEntry(); + builder.appendDouble(-6.33); + builder.appendDouble(-3.34); + builder.appendDouble(-0.31); + builder.appendDouble(6.23); + builder.endPositionEntry(); + builder.mvOrdering(Block.MvOrdering.SORTED_ASCENDING); + try (DoubleBlock block = builder.build()) { + MvPercentile.DoubleSortingScratch scratch = new MvPercentile.DoubleSortingScratch(); + double p0 = MvPercentile.calculateDoublePercentile(block, block.getFirstValueIndex(0), block.getValueCount(0), 75, scratch); + double p1 = MvPercentile.calculateDoublePercentile(block, block.getFirstValueIndex(1), block.getValueCount(1), 75, scratch); + assertThat(p0, equalTo(87.5)); + assertThat(p1, equalTo(1.325)); + } + } + } + + public void testDoubleRandomOrder() { + try (DoubleBlock.Builder builder = TestBlockFactory.getNonBreakingInstance().newDoubleBlockBuilder(10)) { + builder.beginPositionEntry(); + builder.appendDouble(80); + builder.appendDouble(90); + builder.endPositionEntry(); + builder.beginPositionEntry(); + builder.appendDouble(-3.34); + builder.appendDouble(-6.33); + builder.appendDouble(6.23); + builder.appendDouble(-0.31); + builder.endPositionEntry(); + try (DoubleBlock block = builder.build()) { + MvPercentile.DoubleSortingScratch scratch = new MvPercentile.DoubleSortingScratch(); + double p0 = MvPercentile.calculateDoublePercentile(block, block.getFirstValueIndex(0), block.getValueCount(0), 75, scratch); + double p1 = MvPercentile.calculateDoublePercentile(block, block.getFirstValueIndex(1), block.getValueCount(1), 75, scratch); + assertThat(p0, equalTo(87.5)); + assertThat(p1, equalTo(1.325)); + } + } + } + + public void testIntMvAsc() { + try (IntBlock.Builder builder = TestBlockFactory.getNonBreakingInstance().newIntBlockBuilder(10)) { + builder.beginPositionEntry(); + builder.appendInt(80); + builder.appendInt(90); + builder.endPositionEntry(); + builder.beginPositionEntry(); + builder.appendInt(-6); + builder.appendInt(-3); + builder.appendInt(0); + builder.appendInt(6); + builder.endPositionEntry(); + builder.mvOrdering(Block.MvOrdering.SORTED_ASCENDING); + try (IntBlock block = builder.build()) { + MvPercentile.IntSortingScratch scratch = new MvPercentile.IntSortingScratch(); + int p0 = MvPercentile.calculateIntPercentile(block, block.getFirstValueIndex(0), block.getValueCount(0), 75, scratch); + int p1 = MvPercentile.calculateIntPercentile(block, block.getFirstValueIndex(1), block.getValueCount(1), 75, scratch); + assertThat(p0, equalTo(87)); + assertThat(p1, equalTo(1)); + } + } + } + + public void testIntRandomOrder() { + try (IntBlock.Builder builder = TestBlockFactory.getNonBreakingInstance().newIntBlockBuilder(10)) { + builder.beginPositionEntry(); + builder.appendInt(80); + builder.appendInt(90); + builder.endPositionEntry(); + builder.beginPositionEntry(); + builder.appendInt(-3); + builder.appendInt(-6); + builder.appendInt(6); + builder.appendInt(0); + builder.endPositionEntry(); + try (IntBlock block = builder.build()) { + MvPercentile.IntSortingScratch scratch = new MvPercentile.IntSortingScratch(); + int p0 = MvPercentile.calculateIntPercentile(block, block.getFirstValueIndex(0), block.getValueCount(0), 75, scratch); + int p1 = MvPercentile.calculateIntPercentile(block, block.getFirstValueIndex(1), block.getValueCount(1), 75, scratch); + assertThat(p0, equalTo(87)); + assertThat(p1, equalTo(1)); + } + } + } + + public void testLongMvAsc() { + try (LongBlock.Builder builder = TestBlockFactory.getNonBreakingInstance().newLongBlockBuilder(10)) { + builder.beginPositionEntry(); + builder.appendLong(80); + builder.appendLong(90); + builder.endPositionEntry(); + builder.beginPositionEntry(); + builder.appendLong(-6); + builder.appendLong(-3); + builder.appendLong(0); + builder.appendLong(6); + builder.endPositionEntry(); + builder.mvOrdering(Block.MvOrdering.SORTED_ASCENDING); + try (LongBlock block = builder.build()) { + MvPercentile.LongSortingScratch scratch = new MvPercentile.LongSortingScratch(); + long p0 = MvPercentile.calculateLongPercentile(block, block.getFirstValueIndex(0), block.getValueCount(0), 75, scratch); + long p1 = MvPercentile.calculateLongPercentile(block, block.getFirstValueIndex(1), block.getValueCount(1), 75, scratch); + assertThat(p0, equalTo(87L)); + assertThat(p1, equalTo(1L)); + } + } + } + + public void testLongRandomOrder() { + try (LongBlock.Builder builder = TestBlockFactory.getNonBreakingInstance().newLongBlockBuilder(10)) { + builder.beginPositionEntry(); + builder.appendLong(80); + builder.appendLong(90); + builder.endPositionEntry(); + builder.beginPositionEntry(); + builder.appendLong(-3); + builder.appendLong(-6); + builder.appendLong(6); + builder.appendLong(0); + builder.endPositionEntry(); + try (LongBlock block = builder.build()) { + MvPercentile.LongSortingScratch scratch = new MvPercentile.LongSortingScratch(); + long p0 = MvPercentile.calculateLongPercentile(block, block.getFirstValueIndex(0), block.getValueCount(0), 75, scratch); + long p1 = MvPercentile.calculateLongPercentile(block, block.getFirstValueIndex(1), block.getValueCount(1), 75, scratch); + assertThat(p0, equalTo(87L)); + assertThat(p1, equalTo(1L)); + } + } + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java index 3410b95458302..29cc959e6a943 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java @@ -59,7 +59,7 @@ public static Iterable parameters() { } } - for (var percentileType : List.of(INTEGER, LONG, DataType.DOUBLE)) { + for (var percentileType : List.of(INTEGER, LONG, DOUBLE)) { cases.addAll( List.of( // Doubles @@ -334,6 +334,21 @@ public static Iterable parameters() { ); } } + cases.add( + new TestCaseSupplier( + "from example", + List.of(DOUBLE, INTEGER), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-3.34, -6.33, 6.23, -0.31), DOUBLE, "field"), + new TestCaseSupplier.TypedData(75, INTEGER, "percentile") + ), + evaluatorString(DOUBLE, INTEGER), + DOUBLE, + equalTo(1.325) + ) + ) + ); return parameterSuppliersFromTypedDataWithDefaultChecks( (nullPosition, nullValueDataType, original) -> nullValueDataType == DataType.NULL && nullPosition == 0 From 631a63c9ed87bb03dc447a4c0ed528d37e87c24e Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Mon, 26 Aug 2024 15:44:23 -0400 Subject: [PATCH 212/389] [CI] Add lucene snapshot pipeline schedules for lucene_snapshot_10 branch (#112215) --- .../scripts/lucene-snapshot/update-branch.sh | 10 +++++----- .../lucene-snapshot/update-es-snapshot.sh | 4 ++-- catalog-info.yaml | 18 +++++++++++++++--- 3 files changed, 22 insertions(+), 10 deletions(-) diff --git a/.buildkite/scripts/lucene-snapshot/update-branch.sh b/.buildkite/scripts/lucene-snapshot/update-branch.sh index d02123f3236e7..6a2d1e3df05f7 100755 --- a/.buildkite/scripts/lucene-snapshot/update-branch.sh +++ b/.buildkite/scripts/lucene-snapshot/update-branch.sh @@ -2,17 +2,17 @@ set -euo pipefail -if [[ "$BUILDKITE_BRANCH" != "lucene_snapshot" ]]; then - echo "Error: This script should only be run on the lucene_snapshot branch" +if [[ "$BUILDKITE_BRANCH" != "lucene_snapshot"* ]]; then + echo "Error: This script should only be run on lucene_snapshot branches" exit 1 fi -echo --- Updating lucene_snapshot branch with main +echo --- Updating "$BUILDKITE_BRANCH" branch with main git config --global user.name elasticsearchmachine git config --global user.email 'infra-root+elasticsearchmachine@elastic.co' -git checkout lucene_snapshot +git checkout "$BUILDKITE_BRANCH" git fetch origin main git merge --no-edit origin/main -git push origin lucene_snapshot +git push origin "$BUILDKITE_BRANCH" diff --git a/.buildkite/scripts/lucene-snapshot/update-es-snapshot.sh b/.buildkite/scripts/lucene-snapshot/update-es-snapshot.sh index 75f42a32cb590..7bec83d055139 100755 --- a/.buildkite/scripts/lucene-snapshot/update-es-snapshot.sh +++ b/.buildkite/scripts/lucene-snapshot/update-es-snapshot.sh @@ -2,8 +2,8 @@ set -euo pipefail -if [[ "$BUILDKITE_BRANCH" != "lucene_snapshot" ]]; then - echo "Error: This script should only be run on the lucene_snapshot branch" +if [[ "$BUILDKITE_BRANCH" != "lucene_snapshot"* ]]; then + echo "Error: This script should only be run on the lucene_snapshot branches" exit 1 fi diff --git a/catalog-info.yaml b/catalog-info.yaml index dfeeae51c1b3a..e57841c9de268 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -125,7 +125,7 @@ spec: ELASTIC_SLACK_NOTIFICATIONS_ENABLED: "true" SLACK_NOTIFICATIONS_CHANNEL: "#lucene" SLACK_NOTIFICATIONS_ALL_BRANCHES: "true" - branch_configuration: lucene_snapshot + branch_configuration: lucene_snapshot lucene_snapshot_10 default_branch: lucene_snapshot teams: elasticsearch-team: {} @@ -142,6 +142,10 @@ spec: branch: lucene_snapshot cronline: "0 2 * * * America/New_York" message: "Builds a new lucene snapshot 1x per day" + Periodically on lucene_snapshot_10: + branch: lucene_snapshot_10 + cronline: "0 2 * * * America/New_York" + message: "Builds a new lucene snapshot 1x per day" --- # yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json apiVersion: backstage.io/v1alpha1 @@ -169,7 +173,7 @@ spec: ELASTIC_SLACK_NOTIFICATIONS_ENABLED: "true" SLACK_NOTIFICATIONS_CHANNEL: "#lucene" SLACK_NOTIFICATIONS_ALL_BRANCHES: "true" - branch_configuration: lucene_snapshot + branch_configuration: lucene_snapshot lucene_snapshot_10 default_branch: lucene_snapshot teams: elasticsearch-team: {} @@ -186,6 +190,10 @@ spec: branch: lucene_snapshot cronline: "0 6 * * * America/New_York" message: "Merges main into lucene_snapshot branch 1x per day" + Periodically on lucene_snapshot_10: + branch: lucene_snapshot_10 + cronline: "0 6 * * * America/New_York" + message: "Merges main into lucene_snapshot_10 branch 1x per day" --- # yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json apiVersion: backstage.io/v1alpha1 @@ -213,7 +221,7 @@ spec: ELASTIC_SLACK_NOTIFICATIONS_ENABLED: "true" SLACK_NOTIFICATIONS_CHANNEL: "#lucene" SLACK_NOTIFICATIONS_ALL_BRANCHES: "true" - branch_configuration: lucene_snapshot + branch_configuration: lucene_snapshot lucene_snapshot_10 default_branch: lucene_snapshot teams: elasticsearch-team: {} @@ -230,6 +238,10 @@ spec: branch: lucene_snapshot cronline: "0 9,12,15,18 * * * America/New_York" message: "Runs tests against lucene_snapshot branch several times per day" + Periodically on lucene_snapshot_10: + branch: lucene_snapshot_10 + cronline: "0 9,12,15,18 * * * America/New_York" + message: "Runs tests against lucene_snapshot_10 branch several times per day" --- # yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json apiVersion: backstage.io/v1alpha1 From ef95cdd4cce3cdb3c788dd6c2de122dcc7f82d4a Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 26 Aug 2024 18:51:12 -0700 Subject: [PATCH 213/389] Fix native library loading zstd with jna (#112221) Recent refactoring of native library paths broke jna loading zstd. This commit fixes jna to set the jna.library.path during init so that jna calls to load libraries still work. --- .../nativeaccess/jna/JnaNativeLibraryProvider.java | 11 +++++++++++ .../elasticsearch/nativeaccess/lib/LoaderHelper.java | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java index 79caf04c97246..e0233187425ea 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java @@ -8,9 +8,11 @@ package org.elasticsearch.nativeaccess.jna; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.nativeaccess.lib.JavaLibrary; import org.elasticsearch.nativeaccess.lib.Kernel32Library; import org.elasticsearch.nativeaccess.lib.LinuxCLibrary; +import org.elasticsearch.nativeaccess.lib.LoaderHelper; import org.elasticsearch.nativeaccess.lib.MacCLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; @@ -23,6 +25,10 @@ public class JnaNativeLibraryProvider extends NativeLibraryProvider { + static { + setJnaLibraryPath(); + } + public JnaNativeLibraryProvider() { super( "jna", @@ -45,6 +51,11 @@ public JnaNativeLibraryProvider() { ); } + @SuppressForbidden(reason = "jna library path must be set for load library to work with our own libs") + private static void setJnaLibraryPath() { + System.setProperty("jna.library.path", LoaderHelper.platformLibDir.toString()); + } + private static Supplier notImplemented() { return () -> { throw new AssertionError(); }; } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java index 4da52c415c040..42ca60b81a027 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java @@ -16,7 +16,7 @@ * A utility for loading libraries from Elasticsearch's platform specific lib dir. */ public class LoaderHelper { - private static final Path platformLibDir = findPlatformLibDir(); + public static final Path platformLibDir = findPlatformLibDir(); private static Path findPlatformLibDir() { // tests don't have an ES install, so the platform dir must be passed in explicitly From 535e9edced9995e8411b46622e29f8ae006ab4f1 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Tue, 27 Aug 2024 06:38:11 +0400 Subject: [PATCH 214/389] Add ingest-geoip module to rest-resources-zip (#112216) --- modules/ingest-geoip/build.gradle | 4 ++++ x-pack/rest-resources-zip/build.gradle | 1 + 2 files changed, 5 insertions(+) diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index 5bdb6da5c7b29..bc5bb165cd0d2 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -88,3 +88,7 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> task.skipTestsByFilePattern("**/ingest_geoip/20_geoip_processor.yml", "from 8.0 yaml rest tests use geoip test fixture and default geoip are no longer packaged. In 7.x yaml tests used default databases which makes tests results very different, so skipping these tests") // task.skipTest("lang_mustache/50_multi_search_template/Multi-search template with errors", "xxx") } + +artifacts { + restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) +} diff --git a/x-pack/rest-resources-zip/build.gradle b/x-pack/rest-resources-zip/build.gradle index cc5bddf12d801..0133ff80dfadf 100644 --- a/x-pack/rest-resources-zip/build.gradle +++ b/x-pack/rest-resources-zip/build.gradle @@ -21,6 +21,7 @@ dependencies { freeTests project(path: ':rest-api-spec', configuration: 'restTests') freeTests project(path: ':modules:aggregations', configuration: 'restTests') freeTests project(path: ':modules:analysis-common', configuration: 'restTests') + freeTests project(path: ':modules:ingest-geoip', configuration: 'restTests') compatApis project(path: ':rest-api-spec', configuration: 'restCompatSpecs') compatApis project(path: ':x-pack:plugin', configuration: 'restCompatSpecs') freeCompatTests project(path: ':rest-api-spec', configuration: 'restCompatTests') From d14fe7733b2ce361e08c05624668fddbf2763a86 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 27 Aug 2024 17:03:01 +1000 Subject: [PATCH 215/389] Expand RecordingInstrucments to support collection of observers (#112195) The support is needed for RecordingInstruments to be used in tests for guages with a collection of observers. Relates: #110630 --- .../telemetry/RecordingInstruments.java | 29 ++++++++----- .../telemetry/RecordingMeterRegistry.java | 42 +++++++++++-------- 2 files changed, 43 insertions(+), 28 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingInstruments.java b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingInstruments.java index 35417c16e7e1c..49e667bb74e5b 100644 --- a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingInstruments.java +++ b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingInstruments.java @@ -24,6 +24,7 @@ import org.elasticsearch.telemetry.metric.LongUpDownCounter; import org.elasticsearch.telemetry.metric.LongWithAttributes; +import java.util.Collection; import java.util.Collections; import java.util.Map; import java.util.Objects; @@ -53,7 +54,7 @@ public String getName() { } } - protected interface NumberWithAttributesObserver extends Supplier>> { + protected interface NumberWithAttributesObserver extends Supplier>>> { } @@ -74,7 +75,7 @@ public void run() { return; } var observation = observer.get(); - call(observation.v1(), observation.v2()); + observation.forEach(o -> call(o.v1(), o.v2())); } } @@ -109,10 +110,10 @@ public void incrementBy(double inc, Map attributes) { } public static class RecordingDoubleGauge extends CallbackRecordingInstrument implements DoubleGauge { - public RecordingDoubleGauge(String name, Supplier observer, MetricRecorder recorder) { + public RecordingDoubleGauge(String name, Supplier> observer, MetricRecorder recorder) { super(name, () -> { var observation = observer.get(); - return new Tuple<>(observation.value(), observation.attributes()); + return observation.stream().map(o -> new Tuple<>((Number) o.value(), o.attributes())).toList(); }, recorder); } } @@ -172,10 +173,14 @@ public void incrementBy(long inc, Map attributes) { public static class RecordingAsyncLongCounter extends CallbackRecordingInstrument implements LongAsyncCounter { - public RecordingAsyncLongCounter(String name, Supplier observer, MetricRecorder recorder) { + public RecordingAsyncLongCounter( + String name, + Supplier> observer, + MetricRecorder recorder + ) { super(name, () -> { var observation = observer.get(); - return new Tuple<>(observation.value(), observation.attributes()); + return observation.stream().map(o -> new Tuple<>((Number) o.value(), o.attributes())).toList(); }, recorder); } @@ -183,10 +188,14 @@ public RecordingAsyncLongCounter(String name, Supplier obser public static class RecordingAsyncDoubleCounter extends CallbackRecordingInstrument implements DoubleAsyncCounter { - public RecordingAsyncDoubleCounter(String name, Supplier observer, MetricRecorder recorder) { + public RecordingAsyncDoubleCounter( + String name, + Supplier> observer, + MetricRecorder recorder + ) { super(name, () -> { var observation = observer.get(); - return new Tuple<>(observation.value(), observation.attributes()); + return observation.stream().map(o -> new Tuple<>((Number) o.value(), o.attributes())).toList(); }, recorder); } @@ -194,10 +203,10 @@ public RecordingAsyncDoubleCounter(String name, Supplier o public static class RecordingLongGauge extends CallbackRecordingInstrument implements LongGauge { - public RecordingLongGauge(String name, Supplier observer, MetricRecorder recorder) { + public RecordingLongGauge(String name, Supplier> observer, MetricRecorder recorder) { super(name, () -> { var observation = observer.get(); - return new Tuple<>(observation.value(), observation.attributes()); + return observation.stream().map(o -> new Tuple<>((Number) o.value(), o.attributes())).toList(); }, recorder); } } diff --git a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java index 97fe0ad1370ef..392445aa77a8f 100644 --- a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java +++ b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java @@ -24,6 +24,7 @@ import org.elasticsearch.telemetry.metric.MeterRegistry; import java.util.Collection; +import java.util.Collections; import java.util.function.Supplier; /** @@ -72,9 +73,7 @@ protected DoubleUpDownCounter buildDoubleUpDownCounter(String name, String descr @Override public DoubleGauge registerDoubleGauge(String name, String description, String unit, Supplier observer) { - DoubleGauge instrument = buildDoubleGauge(name, description, unit, observer); - recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); - return instrument; + return registerDoublesGauge(name, description, unit, () -> Collections.singleton(observer.get())); } @Override @@ -84,7 +83,9 @@ public DoubleGauge registerDoublesGauge( String unit, Supplier> observer ) { - throw new UnsupportedOperationException("not implemented"); + DoubleGauge instrument = buildDoubleGauge(name, description, unit, observer); + recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); + return instrument; } @Override @@ -92,7 +93,12 @@ public DoubleGauge getDoubleGauge(String name) { return (DoubleGauge) recorder.getInstrument(InstrumentType.DOUBLE_GAUGE, name); } - protected DoubleGauge buildDoubleGauge(String name, String description, String unit, Supplier observer) { + protected DoubleGauge buildDoubleGauge( + String name, + String description, + String unit, + Supplier> observer + ) { return new RecordingInstruments.RecordingDoubleGauge(name, observer, recorder); } @@ -121,9 +127,7 @@ public LongCounter registerLongCounter(String name, String description, String u @Override public LongAsyncCounter registerLongAsyncCounter(String name, String description, String unit, Supplier observer) { - LongAsyncCounter instrument = new RecordingInstruments.RecordingAsyncLongCounter(name, observer, recorder); - recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); - return instrument; + return registerLongsAsyncCounter(name, description, unit, () -> Collections.singleton(observer.get())); } @Override @@ -133,7 +137,9 @@ public LongAsyncCounter registerLongsAsyncCounter( String unit, Supplier> observer ) { - throw new UnsupportedOperationException("not implemented"); + LongAsyncCounter instrument = new RecordingInstruments.RecordingAsyncLongCounter(name, observer, recorder); + recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); + return instrument; } @Override @@ -148,9 +154,7 @@ public DoubleAsyncCounter registerDoubleAsyncCounter( String unit, Supplier observer ) { - DoubleAsyncCounter instrument = new RecordingInstruments.RecordingAsyncDoubleCounter(name, observer, recorder); - recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); - return instrument; + return registerDoublesAsyncCounter(name, description, unit, () -> Collections.singleton(observer.get())); } @Override @@ -160,7 +164,9 @@ public DoubleAsyncCounter registerDoublesAsyncCounter( String unit, Supplier> observer ) { - throw new UnsupportedOperationException("not implemented"); + DoubleAsyncCounter instrument = new RecordingInstruments.RecordingAsyncDoubleCounter(name, observer, recorder); + recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); + return instrument; } @Override @@ -196,14 +202,14 @@ protected LongUpDownCounter buildLongUpDownCounter(String name, String descripti @Override public LongGauge registerLongGauge(String name, String description, String unit, Supplier observer) { - LongGauge instrument = buildLongGauge(name, description, unit, observer); - recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); - return instrument; + return registerLongsGauge(name, description, unit, () -> Collections.singleton(observer.get())); } @Override public LongGauge registerLongsGauge(String name, String description, String unit, Supplier> observer) { - throw new UnsupportedOperationException("not implemented"); + LongGauge instrument = buildLongGauge(name, description, unit, observer); + recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); + return instrument; } @Override @@ -211,7 +217,7 @@ public LongGauge getLongGauge(String name) { return (LongGauge) recorder.getInstrument(InstrumentType.LONG_GAUGE, name); } - protected LongGauge buildLongGauge(String name, String description, String unit, Supplier observer) { + protected LongGauge buildLongGauge(String name, String description, String unit, Supplier> observer) { return new RecordingInstruments.RecordingLongGauge(name, observer, recorder); } From 303b2274766595c2bbbd2b339345cfa6b6a2009e Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Aug 2024 08:05:46 +0100 Subject: [PATCH 216/389] Add link to warning re. single-node clusters (#112114) Expands the message added in #88013 to include a link to the relevant docs. --- .../cluster/coordination/Coordinator.java | 7 +++++-- .../java/org/elasticsearch/common/ReferenceDocs.java | 1 + .../elasticsearch/common/reference-docs-links.json | 3 ++- .../cluster/coordination/CoordinatorTests.java | 11 ++++++++++- 4 files changed, 18 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 437219b312045..e922d130d7f83 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -41,6 +41,7 @@ import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -831,10 +832,12 @@ public void run() { discover other nodes and form a multi-node cluster via the [{}={}] setting. Fully-formed clusters do \ not attempt to discover other nodes, and nodes with different cluster UUIDs cannot belong to the same \ cluster. The cluster UUID persists across restarts and can only be changed by deleting the contents of \ - the node's data path(s). Remove the discovery configuration to suppress this message.""", + the node's data path(s). Remove the discovery configuration to suppress this message. See [{}] for \ + more information.""", applierState.metadata().clusterUUID(), DISCOVERY_SEED_HOSTS_SETTING.getKey(), - DISCOVERY_SEED_HOSTS_SETTING.get(settings) + DISCOVERY_SEED_HOSTS_SETTING.get(settings), + ReferenceDocs.FORMING_SINGLE_NODE_CLUSTERS ); } } diff --git a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java index f710ae7c3b84a..59c55fb7b624a 100644 --- a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java +++ b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java @@ -81,6 +81,7 @@ public enum ReferenceDocs { MAX_SHARDS_PER_NODE, FLOOD_STAGE_WATERMARK, X_OPAQUE_ID, + FORMING_SINGLE_NODE_CLUSTERS, // this comment keeps the ';' on the next line so every entry above has a trailing ',' which makes the diff for adding new links cleaner ; diff --git a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json index 8288ca792b0f1..3eb8939c22a65 100644 --- a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json +++ b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json @@ -41,5 +41,6 @@ "LUCENE_MAX_DOCS_LIMIT": "size-your-shards.html#troubleshooting-max-docs-limit", "MAX_SHARDS_PER_NODE": "size-your-shards.html#troubleshooting-max-shards-open", "FLOOD_STAGE_WATERMARK": "fix-watermark-errors.html", - "X_OPAQUE_ID": "api-conventions.html#x-opaque-id" + "X_OPAQUE_ID": "api-conventions.html#x-opaque-id", + "FORMING_SINGLE_NODE_CLUSTERS": "modules-discovery-bootstrap-cluster.html#modules-discovery-bootstrap-cluster-joining" } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index b57badb3a180f..bf64b29d364e0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterStateUpdateStats; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamOutput; @@ -79,6 +80,8 @@ import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; import static org.elasticsearch.monitor.StatusInfo.Status.HEALTHY; import static org.elasticsearch.monitor.StatusInfo.Status.UNHEALTHY; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -1762,7 +1765,13 @@ public void testLogsWarningPeriodicallyIfSingleNodeClusterHasSeedHosts() { @Override public void match(LogEvent event) { final String message = event.getMessage().getFormattedMessage(); - assertThat(message, startsWith("This node is a fully-formed single-node cluster with cluster UUID")); + assertThat( + message, + allOf( + startsWith("This node is a fully-formed single-node cluster with cluster UUID"), + containsString(ReferenceDocs.FORMING_SINGLE_NODE_CLUSTERS.toString()) + ) + ); loggedClusterUuid = (String) event.getMessage().getParameters()[0]; } From ec90d2c1239bf848914dc4411c676a1f05f2777a Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Aug 2024 08:06:05 +0100 Subject: [PATCH 217/389] Reduce nesting in restore-snapshot path (#112107) Also cleans up the exception-handling a little to ensure that all failures are logged. --- .../snapshots/RestoreService.java | 114 +++++++++--------- 1 file changed, 59 insertions(+), 55 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 0f03cfab4ad2e..d8987495f9035 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.RefCountingRunnable; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateApplier; @@ -56,7 +57,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; @@ -92,9 +92,9 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.Set; import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.stream.Collectors; @@ -248,62 +248,66 @@ public void restoreSnapshot( final BiConsumer updater ) { assert Repository.assertSnapshotMetaThread(); - try { - // Try and fill in any missing repository UUIDs in case they're needed during the restore - final var repositoryUuidRefreshStep = new ListenableFuture(); - refreshRepositoryUuids( - refreshRepositoryUuidOnRestore, - repositoriesService, - () -> repositoryUuidRefreshStep.onResponse(null), - snapshotMetaExecutor - ); - // Read snapshot info and metadata from the repository - final String repositoryName = request.repository(); - Repository repository = repositoriesService.repository(repositoryName); - final ListenableFuture repositoryDataListener = new ListenableFuture<>(); - repository.getRepositoryData(snapshotMetaExecutor, repositoryDataListener); - - repositoryDataListener.addListener( - listener.delegateFailureAndWrap( - (delegate, repositoryData) -> repositoryUuidRefreshStep.addListener( - delegate.delegateFailureAndWrap((subDelegate, ignored) -> { - assert Repository.assertSnapshotMetaThread(); - final String snapshotName = request.snapshot(); - final Optional matchingSnapshotId = repositoryData.getSnapshotIds() - .stream() - .filter(s -> snapshotName.equals(s.getName())) - .findFirst(); - if (matchingSnapshotId.isPresent() == false) { - throw new SnapshotRestoreException(repositoryName, snapshotName, "snapshot does not exist"); - } + // Try and fill in any missing repository UUIDs in case they're needed during the restore + final var repositoryUuidRefreshStep = SubscribableListener.newForked( + l -> refreshRepositoryUuids(refreshRepositoryUuidOnRestore, repositoriesService, () -> l.onResponse(null), snapshotMetaExecutor) + ); - final SnapshotId snapshotId = matchingSnapshotId.get(); - if (request.snapshotUuid() != null && request.snapshotUuid().equals(snapshotId.getUUID()) == false) { - throw new SnapshotRestoreException( - repositoryName, - snapshotName, - "snapshot UUID mismatch: expected [" - + request.snapshotUuid() - + "] but got [" - + snapshotId.getUUID() - + "]" - ); - } - repository.getSnapshotInfo( - snapshotId, - subDelegate.delegateFailureAndWrap( - (l, snapshotInfo) -> startRestore(snapshotInfo, repository, request, repositoryData, updater, l) - ) - ); - }) - ) + // AtomicReference just so we have somewhere to hold these objects, there's no interesting concurrency here + final AtomicReference repositoryRef = new AtomicReference<>(); + final AtomicReference repositoryDataRef = new AtomicReference<>(); + + SubscribableListener + + .newForked(repositorySetListener -> { + // do this within newForked for exception handling + repositoryRef.set(repositoriesService.repository(request.repository())); + repositorySetListener.onResponse(null); + }) + + .andThen( + repositoryDataListener -> repositoryRef.get().getRepositoryData(snapshotMetaExecutor, repositoryDataListener) + ) + .andThenAccept(repositoryDataRef::set) + .andThen(repositoryUuidRefreshStep::addListener) + + .andThen(snapshotInfoListener -> { + assert Repository.assertSnapshotMetaThread(); + final String snapshotName = request.snapshot(); + final SnapshotId snapshotId = repositoryDataRef.get() + .getSnapshotIds() + .stream() + .filter(s -> snapshotName.equals(s.getName())) + .findFirst() + .orElseThrow(() -> new SnapshotRestoreException(request.repository(), snapshotName, "snapshot does not exist")); + + if (request.snapshotUuid() != null && request.snapshotUuid().equals(snapshotId.getUUID()) == false) { + throw new SnapshotRestoreException( + request.repository(), + snapshotName, + "snapshot UUID mismatch: expected [" + request.snapshotUuid() + "] but got [" + snapshotId.getUUID() + "]" + ); + } + + repositoryRef.get().getSnapshotInfo(snapshotId, snapshotInfoListener); + }) + + .andThen( + (responseListener, snapshotInfo) -> startRestore( + snapshotInfo, + repositoryRef.get(), + request, + repositoryDataRef.get(), + updater, + responseListener ) - ); - } catch (Exception e) { - logger.warn(() -> "[" + request.repository() + ":" + request.snapshot() + "] failed to restore snapshot", e); - listener.onFailure(e); - } + ) + + .addListener(listener.delegateResponse((delegate, e) -> { + logger.warn(() -> "[" + request.repository() + ":" + request.snapshot() + "] failed to restore snapshot", e); + delegate.onFailure(e); + })); } /** From bff45aaa8a2d53d3de44c66a2c692664fa3b3d46 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Aug 2024 08:06:20 +0100 Subject: [PATCH 218/389] Reduce `CompletableFuture` usage in tests (#111848) Fixes some spots in tests where we use `CompletableFuture` instead of one of the preferred alternatives. --- .../grok/MatcherWatchdogTests.java | 9 +- .../action/bulk/BulkOperationTests.java | 136 +++++------------- .../ingest/ConditionalProcessorTests.java | 8 +- .../ingest/PipelineProcessorTests.java | 10 +- .../security/authc/ApiKeyServiceTests.java | 16 +-- 5 files changed, 53 insertions(+), 126 deletions(-) diff --git a/libs/grok/src/test/java/org/elasticsearch/grok/MatcherWatchdogTests.java b/libs/grok/src/test/java/org/elasticsearch/grok/MatcherWatchdogTests.java index b66778743aec0..5ed1a7d13b80a 100644 --- a/libs/grok/src/test/java/org/elasticsearch/grok/MatcherWatchdogTests.java +++ b/libs/grok/src/test/java/org/elasticsearch/grok/MatcherWatchdogTests.java @@ -7,12 +7,12 @@ */ package org.elasticsearch.grok; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.test.ESTestCase; import org.joni.Matcher; import org.mockito.Mockito; import java.util.Map; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -77,16 +77,17 @@ public void testIdleIfNothingRegistered() throws Exception { ); // Periodic action is not scheduled because no thread is registered verifyNoMoreInteractions(threadPool); - CompletableFuture commandFuture = new CompletableFuture<>(); + + PlainActionFuture commandFuture = new PlainActionFuture<>(); // Periodic action is scheduled because a thread is registered doAnswer(invocationOnMock -> { - commandFuture.complete((Runnable) invocationOnMock.getArguments()[0]); + commandFuture.onResponse(invocationOnMock.getArgument(0)); return null; }).when(threadPool).schedule(any(Runnable.class), eq(interval), eq(TimeUnit.MILLISECONDS)); Matcher matcher = mock(Matcher.class); watchdog.register(matcher); // Registering the first thread should have caused the command to get scheduled again - Runnable command = commandFuture.get(1L, TimeUnit.MILLISECONDS); + Runnable command = safeGet(commandFuture); Mockito.reset(threadPool); watchdog.unregister(matcher); command.run(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java index e950901a538b4..0c0e1de74a3e7 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterName; @@ -60,9 +61,7 @@ import java.util.Arrays; import java.util.List; import java.util.Map; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; @@ -201,9 +200,6 @@ public void tearDownThreadpool() { public void testClusterBlockedFailsBulk() { NodeClient client = getNodeClient(assertNoClientInteraction()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - // Not retryable ClusterState state = ClusterState.builder(DEFAULT_STATE) .blocks(ClusterBlocks.builder().addGlobalBlock(Metadata.CLUSTER_READ_ONLY_BLOCK).build()) @@ -215,9 +211,10 @@ public void testClusterBlockedFailsBulk() { when(observer.isTimedOut()).thenReturn(false); doThrow(new AssertionError("Should not wait")).when(observer).waitForNextChange(any()); - newBulkOperation(client, new BulkRequest(), state, observer, listener).run(); - - expectThrows(ExecutionException.class, ClusterBlockException.class, future::get); + assertThat( + safeAwaitFailure(BulkResponse.class, l -> newBulkOperation(client, new BulkRequest(), state, observer, l).run()), + instanceOf(ClusterBlockException.class) + ); } /** @@ -226,9 +223,6 @@ public void testClusterBlockedFailsBulk() { public void testTimeoutOnRetryableClusterBlockedFailsBulk() { NodeClient client = getNodeClient(assertNoClientInteraction()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - // Retryable final ClusterState state = ClusterState.builder(DEFAULT_STATE) .blocks(ClusterBlocks.builder().addGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_WRITES).build()) @@ -248,9 +242,11 @@ public void testTimeoutOnRetryableClusterBlockedFailsBulk() { return null; }).doThrow(new AssertionError("Should not wait")).when(observer).waitForNextChange(any()); - newBulkOperation(client, new BulkRequest(), state, observer, listener).run(); + assertThat( + safeAwaitFailure(BulkResponse.class, l -> newBulkOperation(client, new BulkRequest(), state, observer, l).run()), + instanceOf(ClusterBlockException.class) + ); - expectThrows(ExecutionException.class, ClusterBlockException.class, future::get); verify(observer, times(2)).isTimedOut(); verify(observer, times(1)).waitForNextChange(any()); } @@ -261,9 +257,6 @@ public void testTimeoutOnRetryableClusterBlockedFailsBulk() { public void testNodeClosedOnRetryableClusterBlockedFailsBulk() { NodeClient client = getNodeClient(assertNoClientInteraction()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - // Retryable final ClusterState state = ClusterState.builder(DEFAULT_STATE) .blocks(ClusterBlocks.builder().addGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_WRITES).build()) @@ -278,9 +271,10 @@ public void testNodeClosedOnRetryableClusterBlockedFailsBulk() { return null; }).doThrow(new AssertionError("Should not wait")).when(observer).waitForNextChange(any()); - newBulkOperation(client, new BulkRequest(), state, observer, listener).run(); - - expectThrows(ExecutionException.class, NodeClosedException.class, future::get); + assertThat( + safeAwaitFailure(BulkResponse.class, l -> newBulkOperation(client, new BulkRequest(), state, observer, l).run()), + instanceOf(NodeClosedException.class) + ); verify(observer, times(1)).isTimedOut(); verify(observer, times(1)).waitForNextChange(any()); } @@ -296,12 +290,7 @@ public void testBulkToIndex() throws Exception { NodeClient client = getNodeClient(acceptAllShardWrites()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(false)); } @@ -318,12 +307,7 @@ public void testBulkToIndexFailingEntireShard() throws Exception { shardSpecificResponse(Map.of(new ShardId(indexMetadata.getIndex(), 0), failWithException(() -> new MapperException("test")))) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -344,12 +328,7 @@ public void testBulkToDataStream() throws Exception { NodeClient client = getNodeClient(acceptAllShardWrites()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(false)); } @@ -366,12 +345,7 @@ public void testBulkToDataStreamFailingEntireShard() throws Exception { shardSpecificResponse(Map.of(new ShardId(ds1BackingIndex2.getIndex(), 0), failWithException(() -> new MapperException("test")))) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -396,12 +370,7 @@ public void testFailingEntireShardRedirectsToFailureStore() throws Exception { shardSpecificResponse(Map.of(new ShardId(ds2BackingIndex1.getIndex(), 0), failWithException(() -> new MapperException("test")))) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(false)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(item -> item.getIndex().equals(ds2FailureStore1.getIndex().getName())) @@ -426,12 +395,7 @@ public void testFailingDocumentRedirectsToFailureStore() throws Exception { thatFailsDocuments(Map.of(new IndexAndId(ds2BackingIndex1.getIndex().getName(), "3"), () -> new MapperException("test"))) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(false)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(item -> item.getIndex().equals(ds2FailureStore1.getIndex().getName())) @@ -465,12 +429,7 @@ public void testFailureStoreShardFailureRejectsDocument() throws Exception { ) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -500,16 +459,12 @@ public void testFailedDocumentCanNotBeConvertedFails() throws Exception { thatFailsDocuments(Map.of(new IndexAndId(ds2BackingIndex1.getIndex().getName(), "3"), () -> new MapperException("root cause"))) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - // Mock a failure store document converter that always fails FailureStoreDocumentConverter mockConverter = mock(FailureStoreDocumentConverter.class); when(mockConverter.transformFailedRequest(any(), any(), any(), any())).thenThrow(new IOException("Could not serialize json")); - newBulkOperation(client, bulkRequest, mockConverter, listener).run(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, mockConverter, l).run()); - BulkResponse bulkItemResponses = future.get(); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -579,13 +534,10 @@ public void testRetryableBlockAcceptsFailureStoreDocument() throws Exception { return null; }).when(observer).waitForNextChange(any()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.notifyOnce( - ActionListener.wrap(future::complete, future::completeExceptionally) + final SubscribableListener responseListener = SubscribableListener.newForked( + l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run() ); - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - // The operation will attempt to write the documents in the request, receive a failure, wait for a stable cluster state, and then // redirect the failed documents to the failure store. Wait for that failure store write to start: if (readyToPerformFailureStoreWrite.await(30, TimeUnit.SECONDS) == false) { @@ -595,7 +547,7 @@ public void testRetryableBlockAcceptsFailureStoreDocument() throws Exception { } // Check to make sure there is no response yet - if (future.isDone()) { + if (responseListener.isDone()) { // we're going to fail the test, but be a good citizen and unblock the other thread first beginFailureStoreWrite.countDown(); fail("bulk operation completed prematurely"); @@ -605,7 +557,7 @@ public void testRetryableBlockAcceptsFailureStoreDocument() throws Exception { beginFailureStoreWrite.countDown(); // Await final result and verify - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(responseListener); assertThat(bulkItemResponses.hasFailures(), is(false)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(item -> item.getIndex().equals(ds2FailureStore1.getIndex().getName())) @@ -650,12 +602,7 @@ public void testBlockedClusterRejectsFailureStoreDocument() throws Exception { when(observer.isTimedOut()).thenReturn(false); doThrow(new AssertionError("Should not wait on non retryable block")).when(observer).waitForNextChange(any()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run()); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -715,12 +662,7 @@ public void testOperationTimeoutRejectsFailureStoreDocument() throws Exception { return null; }).doThrow(new AssertionError("Should not wait any longer")).when(observer).waitForNextChange(any()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run()); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -775,12 +717,10 @@ public void testNodeClosureRejectsFailureStoreDocument() { return null; }).doThrow(new AssertionError("Should not wait any longer")).when(observer).waitForNextChange(any()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - - expectThrows(ExecutionException.class, NodeClosedException.class, future::get); + assertThat( + safeAwaitFailure(BulkResponse.class, l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run()), + instanceOf(NodeClosedException.class) + ); verify(observer, times(1)).isTimedOut(); verify(observer, times(1)).waitForNextChange(any()); @@ -832,12 +772,7 @@ public void testLazilyRollingOverFailureStore() throws Exception { ClusterState rolledOverState = ClusterState.builder(DEFAULT_STATE).metadata(metadata).build(); ClusterStateObserver observer = mockObserver(DEFAULT_STATE, DEFAULT_STATE, rolledOverState); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run()); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(item -> item.getIndex().equals(ds3FailureStore2.getIndex().getName())) .findFirst() @@ -880,12 +815,7 @@ public void testFailureWhileRollingOverFailureStore() throws Exception { ClusterState rolledOverState = ClusterState.builder(DEFAULT_STATE).metadata(metadata).build(); ClusterStateObserver observer = mockObserver(DEFAULT_STATE, DEFAULT_STATE, rolledOverState); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run()); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) .findFirst() diff --git a/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java b/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java index 3a6de10b5901d..546b252615b28 100644 --- a/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.ingest; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.IngestConditionalScript; import org.elasticsearch.script.MockScriptEngine; @@ -25,7 +26,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; @@ -242,14 +242,14 @@ public boolean execute(Map ctx) { private static void assertMutatingCtxThrows(Consumer> mutation) throws Exception { String scriptName = "conditionalScript"; - CompletableFuture expectedException = new CompletableFuture<>(); + PlainActionFuture expectedException = new PlainActionFuture<>(); ScriptService scriptService = new ScriptService( Settings.builder().build(), Map.of(Script.DEFAULT_SCRIPT_LANG, new MockScriptEngine(Script.DEFAULT_SCRIPT_LANG, Map.of(scriptName, ctx -> { try { mutation.accept(ctx); } catch (Exception e) { - expectedException.complete(e); + expectedException.onResponse(e); } return false; }), Map.of())), @@ -267,7 +267,7 @@ private static void assertMutatingCtxThrows(Consumer> mutati IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); ingestDocument.setFieldValue("listField", new ArrayList<>()); execProcessor(processor, ingestDocument, (result, e) -> {}); - Exception e = expectedException.get(); + Exception e = safeGet(expectedException); assertThat(e, instanceOf(UnsupportedOperationException.class)); assertEquals("Mutating ingest documents in conditionals is not supported", e.getMessage()); assertStats(processor, 0, 0, 0); diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineProcessorTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineProcessorTests.java index cfbdbc3792082..d9058e83acfe0 100644 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/PipelineProcessorTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.TemplateScript; import org.elasticsearch.test.ESTestCase; @@ -16,7 +17,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.function.LongSupplier; @@ -32,12 +32,12 @@ public class PipelineProcessorTests extends ESTestCase { public void testExecutesPipeline() throws Exception { String pipelineId = "pipeline"; IngestService ingestService = createIngestService(); - CompletableFuture invoked = new CompletableFuture<>(); + PlainActionFuture invoked = new PlainActionFuture<>(); IngestDocument testIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); Pipeline pipeline = new Pipeline(pipelineId, null, null, null, new CompoundProcessor(new Processor() { @Override - public IngestDocument execute(final IngestDocument ingestDocument) throws Exception { - invoked.complete(ingestDocument); + public IngestDocument execute(final IngestDocument ingestDocument) { + invoked.onResponse(ingestDocument); return ingestDocument; } @@ -61,7 +61,7 @@ public String getDescription() { Map config = new HashMap<>(); config.put("name", pipelineId); factory.create(Map.of(), null, null, config).execute(testIngestDocument, (result, e) -> {}); - assertIngestDocument(testIngestDocument, invoked.get()); + assertIngestDocument(testIngestDocument, safeGet(invoked)); } public void testThrowsOnMissingPipeline() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index f4d75434b92de..fa6eb307933ec 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -145,7 +145,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -3442,15 +3441,12 @@ public static Authentication createApiKeyAuthentication( Authentication.newApiKeyAuthentication(authenticationResult, "node01"), threadContext ); - final CompletableFuture authFuture = new CompletableFuture<>(); - securityContext.executeAfterRewritingAuthentication((c) -> { - try { - authFuture.complete(authenticationContextSerializer.readFromContext(threadContext)); - } catch (IOException e) { - throw new RuntimeException(e); - } - }, version); - return authFuture.get(); + return safeAwait( + l -> securityContext.executeAfterRewritingAuthentication( + c -> ActionListener.completeWith(l, () -> authenticationContextSerializer.readFromContext(threadContext)), + version + ) + ); } public static Authentication createApiKeyAuthentication(ApiKeyService apiKeyService, Authentication authentication) From 6d886bc48d71076d37a07faceb1e421b95ec48fd Mon Sep 17 00:00:00 2001 From: Ioana Tagirta Date: Tue, 27 Aug 2024 09:20:59 +0200 Subject: [PATCH 219/389] Add dataset for full text search testing (#112105) --- .../xpack/esql/CsvTestsDataLoader.java | 4 +- .../testFixtures/src/main/resources/books.csv | 80 +++++++++++++++++ .../src/main/resources/mapping-books.json | 30 +++++++ .../main/resources/match-operator.csv-spec | 90 ++++++++++++------- .../src/main/resources/match.csv-spec | 53 ++++++----- 5 files changed, 203 insertions(+), 54 deletions(-) create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-books.json diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index d5e70d264c9be..b20e3bb0d5409 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -100,6 +100,7 @@ public class CsvTestsDataLoader { private static final TestsDataset DISTANCES = new TestsDataset("distances", "mapping-distances.json", "distances.csv"); private static final TestsDataset K8S = new TestsDataset("k8s", "k8s-mappings.json", "k8s.csv", "k8s-settings.json", true); private static final TestsDataset ADDRESSES = new TestsDataset("addresses", "mapping-addresses.json", "addresses.csv", null, true); + private static final TestsDataset BOOKS = new TestsDataset("books", "mapping-books.json", "books.csv", null, true); public static final Map CSV_DATASET_MAP = Map.ofEntries( Map.entry(EMPLOYEES.indexName, EMPLOYEES), @@ -126,7 +127,8 @@ public class CsvTestsDataLoader { Map.entry(DATE_NANOS.indexName, DATE_NANOS), Map.entry(K8S.indexName, K8S), Map.entry(DISTANCES.indexName, DISTANCES), - Map.entry(ADDRESSES.indexName, ADDRESSES) + Map.entry(ADDRESSES.indexName, ADDRESSES), + Map.entry(BOOKS.indexName, BOOKS) ); private static final EnrichConfig LANGUAGES_ENRICH = new EnrichConfig("languages_policy", "enrich-policy-languages.json"); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv new file mode 100644 index 0000000000000..1deefaa3c6475 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv @@ -0,0 +1,80 @@ +book_no:keyword,title:text,author:text,year:integer,publisher:text,ratings:float,description:text +2924,A Gentle Creature and Other Stories: White Nights\, A Gentle Creature\, and The Dream of a Ridiculous Man (The World's Classics),[Fyodor Dostoevsky, Alan Myers, W. J. Leatherbarrow],2009,Oxford Paperbacks,4.00,In these stories Dostoevsky explores both the figure of the dreamer divorced from reality and also his own ambiguous attitude to utopianism\, themes central to many of his great novels. This new translation captures the power and lyricism of Dostoevsky's writing\, while the introduction examines the stories in relation to one another and to his novels. +7670,A Middle English Reader and Vocabulary,[Kenneth Sisam, J. R. R. Tolkien],2011,Courier Corporation,4.33,This highly respected anthology of medieval English literature features poetry\, prose and popular tales from Arthurian legend and classical mythology. Includes notes on each extract\, appendices\, and an extensive glossary by J. R. R. Tolkien. +7381,A Psychic in the Heartland: The Extraordinary Experiences of a Small Town Doctor,Bettilu Stein Faulkner,2003,Red Wheel/Weiser,4.50,The true story of a small-town doctor destined to live his life along two paths: one as a successful physician\, the other as a psychic with ever more interesting adventures. Experiencing a wide range of spiritual phenomena\, Dr. Riblet Hout learned about the connection between the healer and the healed\, our individual missions on earth\, free will\, and our relationship with God. He also paints a vivid picture of life on the other side as well as the moment of transition from physical life to afterlife. +2883,A Summer of Faulkner: As I Lay Dying/The Sound and the Fury/Light in August (Oprah's Book Club),William Faulkner,2005,Vintage Books,3.89,Presents three novels\, including As I Lay Dying\, in which the Bundren family journeys across Mississippi to bury their mother\, The Sound and the Fury\, in which Caddy Compson's story is narrated by her three brothers\, and Light in August\, in which th +4023,A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings,[Walter Scheps, Agnes Perkins, Charles Adolph Huttar, John Ronald Reuel Tolkien],1975,Open Court Publishing,4.67,The structure\, content\, and character of Tolkien's The Hobbit and The Lord of the Rings are dealt with in ten critical essays. +2382,A Wizard of Earthsea (Earthsea Trilogy Ser.),Ursula K. Le Guin,1991,Atheneum Books for Young Readers,4.01,A boy grows to manhood while attempting to subdue the evil he unleashed on the world as an apprentice to the Master Wizard. +7541,A Writer's Diary (Volume 1: 1873-1876),Fyodor Dostoevsky,1997,Northwestern University Press,4.50,Winner of the AATSEEL Outstanding Translation Award This is the first paperback edition of the complete collection of writings that has been called Dostoevsky's boldest experiment with literary form\, it is a uniquely encyclopedic forum of fictional and nonfictional genres. The Diary's radical format was matched by the extreme range of its contents. In a single frame it incorporated an astonishing variety of material: short stories\, humorous sketches\, reports on sensational crimes\, historical predictions\, portraits of famous people\, autobiographical pieces\, and plans for stories\, some of which were never written while others appeared in the Diary itself. +7400,Anna Karenina: Television Tie-In Edition (Signet classics),[Leo Tolstoy, SBP Editors],2019,Samaira Book Publishers,4.45,The Russian novelist and moral philosopher Leo Tolstoy (1828-1910) ranks as one of the world s great writers\, and his 'War and Peace' has been called the greatest novel ever written. But during his long lifetime\, Tolstoy also wrote enough shorter works to fill many volumes. The message in all his stories is presented with such humour that the reader hardly realises that it is strongly didactic. These stories give a snapshot of Russia and its people in the late nineteenth century. +4917,Autumn of the Patriarch,Gabriel Garcia Marquez,2014,Penguin UK,4.33,Gabriel Garcia Marquez\, winner of the 1982 Nobel Prize for Literature and author of One Hundred Years of Solitude\, explores the loneliness of power in Autumn of the Patriarch. 'Over the weekend the vultures got into the presidential palace by pecking through the screens on the balcony windows and the flapping of their wings stirred up the stagnant time inside' As the citizens of an unnamed Caribbean nation creep through dusty corridors in search of their tyrannical leader\, they cannot comprehend that the frail and withered man lying dead on the floor can be the self-styled General of the Universe. Their arrogant\, manically violent leader\, known for serving up traitors to dinner guests and drowning young children at sea\, can surely not die the humiliating death of a mere mortal? Tracing the demands of a man whose egocentric excesses mask the loneliness of isolation and whose lies have become so ingrained that they are indistinguishable from truth\, Marquez has created a fantastical portrait of despotism that rings with an air of reality. 'Delights with its quirky humanity and black humour and impresses by its total originality' Vogue 'Captures perfectly the moral squalor and political paralysis that enshrouds a society awaiting the death of a long-term dictator' Guardian 'Marquez writes in this lyrical\, magical language that no-one else can do' Salman Rushdie +9896,Barn burning (A tale blazer book),William Faulkner,1979,Perfection Learning,3.50,Reprinted from Collected Stories of William Faulkner\, by permission of Random House\, Inc. +9607,Beowolf: The monsters and the critics,John Ronald Reuel Tolkien,1997,HarperCollins UK,4.12,A collection of seven essays by J.R.R. Tolkien arising out of Tolkien's work in medieval literature +1985,Brothers Karamazov,Fyodor Dostoevsky,2015,First Avenue Editions,5.00,Four brothers reunite in their hometown in Russia. The murder of their father forces the brothers to question their beliefs about each other\, religion\, and morality. +2713,Collected Stories of William Faulkner,William Faulkner,1995,Vintage,4.53,A collection of short stories focuses on the people of rural Mississippi +2464,Conversations with Kurt Vonnegut (Literary Conversations),Kurt Vonnegut,1988,Univ. Press of Mississippi,4.40,Gathers interviews with Vonnegut from each period of his career and offers a brief profile of his life and accomplishments +8534,Crime and Punishment (Oxford World's Classics),Fyodor Dostoevsky,2017,Oxford University Press,4.38,'One death\, in exchange for thousands of lives - it's simple arithmetic!' A new translation of Dostoevsky's epic masterpiece\, Crime and Punishment (1866). The impoverished student Raskolnikov decides to free himself from debt by killing an old moneylender\, an act he sees as elevating himself above conventional morality. Like Napoleon he will assert his will and his crime will be justified by its elimination of 'vermin' for the sake of the greater good. But Raskolnikov is torn apart by fear\, guilt\, and a growing conscience under the influence of his love for Sonya. Meanwhile the police detective Porfiry is on his trial. It is a powerfully psychological novel\, in which the St Petersburg setting\, Dostoevsky's own circumstances\, and contemporary social problems all play their part. +8605,Dead Souls,Nikolai Gogol,1997,Vintage,4.28,Chichikov\, an amusing and often confused schemer\, buys deceased serfs' names from landholders' poll tax lists hoping to mortgage them for profit +6970,Domestic Goddesses,Edith Vonnegut,1998,Pomegranate,4.67,In this immensely charming and insightful book\, artist Edith Vonnegut takes issue with traditional art imagery in which women are shown as weak and helpless. Through twenty-seven of her own paintings interspersed with her text\, she poignantly -- and humorously -- illustrates her maxim that the lives of mothers and homemakers are filled with endless challenges and vital decisions that should be portrayed with the dignity they deserve. In Vonnegut's paintings\, one woman bravely blocks the sun from harming a child (Sun Block) while another vacuums the stairs with angelic figures singing her praises (Electrolux). In contrasting her own Domestic Goddesses with the diaphanous women of classical art (seven paintings by masters such as Titian and Botticelli are included)\, she 'expresses the importance of traditional roles of women so cleverly and with such joy that her message and images will be forever emblazoned on our collective psyche. +4814,El Coronel No Tiene Quien Le Escriba / No One Writes to the Colonel (Spanish Edition),Gabriel Garcia Marquez,2005,Harper Collins,4.45,Written with compassionate realism and wit\, the stories in this mesmerizing collection depict the disparities of town and village life in South America\, of the frightfully poor and outrageously rich\, of memories and illusions\, and of lost opportunities and present joys. +4636,FINAL WITNESS,Simon Tolkien,2004,Random House Digital\, Inc.,3.94,The murder of Lady Anne Robinson by two intruders causes a schism in the victim's family when her son convinces police that his father's beautiful personal assistant hired the killers\, while his father\, the British minister of defense\, refuses to believe his son and marries the accused. A first novel. Reprint. +2936,Fellowship of the Ring 2ND Edition,John Ronald Reuel Tolkien,2008,HarperCollins UK,4.43,Sauron\, the Dark Lord\, has gathered to him all the Rings of Power - the means by which he intends to rule Middle-earth. All he lacks in his plans for dominion is the One Ring - the ring that rules them all - which has fallen into the hands of the hobbit\, Bilbo Baggins. In a sleepy village in the Shire\, young Frodo Baggins finds himself faced with an immense task\, as his elderly cousin Bilbo entrusts the Ring to his care. Frodo must leave his home and make a perilous journey across Middle-earth to the Cracks of Doom\, there to destroy the Ring and foil the Dark Lord in his evil purpose. JRR Tolkien's great work of imaginative fiction has been labelled both a heroic romance and a classic fantasy fiction. By turns comic and homely\, epic and diabolic\, the narrative moves through countless changes of scene and character in an imaginary world which is totally convincing in its detail. +8956,GOD BLESS YOU MR. ROSEWATER : Or Pearls Before Swine,Kurt Vonnegut,1970,New York : Dell,4.00,A lawyer schemes to gain control of a large fortune by having the present claimant declared insane. +6818,Hadji Murad,Leo Tolstoy,2022,Hachette UK,3.88,'How truth thickens and deepens when it migrates from didactic fable to the raw experience of a visceral awakening is one of the thrills of Tolstoy's stories' Sharon Cameron in her preface to Hadji Murad and Other Stories This\, the third volume of Tolstoy's shorter fiction concentrates on his later stories\, including one of his greatest\, 'Hadji Murad'. In the stark form of homily that shapes these later works\, life considered as one's own has no rational meaning. From the chain of events that follows in the wake of two schoolboys' deception in 'The Forged Coupon' to the disillusionment of the narrator in 'After the Ball' we see\, in Virginia Woolf's observation\, that Tolstoy puts at the centre of his writing one 'who gathers into himself all experience\, turns the world round between his fingers\, and never ceases to ask\, even as he enjoys it\, what is the meaning of it'. The riverrun edition reissues the translation of Louise and Aylmer Maude\, whose influential versions of Tolstoy first brought his work to a wide readership in English. +3950,Hocus,Kurt Vonnegut,1997,Penguin,4.67,Tarkington College\, a small\, exclusive college in upstate New York\, is turned upside down when ten thousand prisoners from the maximum security prison across Lake Mohiga break out and head for the college +5404,Intruder in the dust,William Faulkner,2011,Vintage,3.18,A classic Faulkner novel which explores the lives of a family of characters in the South. An aging black who has long refused to adopt the black's traditionally servile attitude is wrongfully accused of murdering a white man. +5578,Intruder in the dust: A novel,William Faulkner,1991,Vintage,3.18,Dramatizes the events that surround the murder of a white man in a volatile Southern community +6380,La hojarasca (Spanish Edition),Gabriel Garcia Marquez,1979,Harper Collins,3.75,Translated from the Spanish by Gregory Rabassa +5335,Letters of J R R Tolkien,J.R.R. Tolkien,2014,HarperCollins,4.70,This collection will entertain all who appreciate the art of masterful letter writing. The Letters of J.R.R Tolkien sheds much light on Tolkien's creative genius and grand design for the creation of a whole new world: Middle-earth. Featuring a radically expanded index\, this volume provides a valuable research tool for all fans wishing to trace the evolution of THE HOBBIT and THE LORD OF THE RINGS. +3870,My First 100 Words in Spanish/English (My First 100 Words Pull-Tab Book),Keith Faulkner,1998,Libros Para Ninos,4.50,Learning a foreign language has never been this much fun! Just pull the sturdy tabs and change the words under the pictures from English to Spanish and back again to English! +4502,O'Brian's Bride,Colleen Faulkner,1995,Zebra Books,5.00,Abandoning her pampered English life to marry a man in the American colonies\, Elizabeth finds her new world shattered when her husband is killed in an accident\, leaving her in charge of a business on the untamed frontier. Original. +7635,Oliphaunt (Beastly Verse),J. R. R. Tolkien,1989,Contemporary Books,2.50,A poem in which an elephant describes himself and his way of life. On board pages. +3254,Pearl and Sir Orfeo,[John Ronald Reuel Tolkien, Christopher Tolkien],1995,Harpercollins Pub Limited,5.00,Three epic poems from 14th century England speak of life during the age of chivalry. Translated from medieval English. +3677,Planet of Exile,Ursula K. Le Guin,1979,Orion,4.20,PLAYAWAY: An alliance between the powerful Tevars and the brown-skinned\, clairvoyant Farbons must take place if the two colonies are to withstand the fierce attack of the nomadic tribes from the north of the planet Eltanin. +4289,Poems from the Hobbit,J R R Tolkien,1999,HarperCollins Publishers,4.00,A collection of J.R.R. Tolkien's Hobbit poems in a miniature hardback volume complete with illustrations by Tolkien himself. Far over misty mountains cold To dungeons deep and caverns old We must away ere break of day To seek the pale enchanted gold. J.R.R. Tolkien's acclaimed The Hobbit contains 12 poems which are themselves masterpieces of writing. This miniature book\, illustrated with 30 of Tolkien's own paintings and drawings from the book -- some quite rare and all in full colour -- includes all the poems\, plus Gollum's eight riddles in verse\, and will be a perfect keepsake for lovers of The Hobbit and of accomplished poetry. +6151,Pop! Went Another Balloon: A Magical Counting Storybook (Magical Counting Storybooks),[Keith Faulkner, Rory Tyger],2003,Dutton Childrens Books,5.00,Toby the turtle goes from in-line skates to a motorcycle to a rocketship with a handful of balloons that pop\, one by one\, along the way. +3535,Rainbow's End: A Magical Story and Moneybox,[Keith Faulkner, Beverlie Manson],2003,Barrons Juveniles,4.00,In this combination picture storybook and coin bank\, the unusual front cover shows an illustration from the story that's embellished with five transparent plastic windows. Opening the book\, children will find a story about a poor little ballerina who is crying because her dancing shoes are worn and she has no money to replace them. Full color. Consumable. +8423,Raising Faithful Kids in a Fast-Paced World,Paul Faulkner,1995,Howard Publishing Company,5.00,To find help for struggling parents\, Dr. Paul Faulkner--renowned family counselor and popular speaker--interviewed 30 successful families who have managed to raise faithful kids while also maintaining demanding careers. The invaluable strategies and methods he gleaned are now available in this powerful book delivered in Dr. Faulkner's warm\, humorous style. +1463,Realms of Tolkien: Images of Middle-earth,J. R. R. Tolkien,1997,HarperCollins Publishers,4.00,Twenty new and familiar Tolkien artists are represented in this fabulous volume\, breathing an extraordinary variety of life into 58 different scenes\, each of which is accompanied by appropriate passage from The Hobbit and The Lord of the Rings and The Silmarillion +6323,Resurrection (The Penguin classics),Leo Tolstoy,2009,Penguin,3.25,Leo Tolstoy's last completed novel\, Resurrection is an intimate\, psychological tale of guilt\, anger and forgiveness Serving on the jury at a murder trial\, Prince Dmitri Nekhlyudov is devastated when he sees the prisoner - Katyusha\, a young maid he seduced and abandoned years before. As Dmitri faces the consequences of his actions\, he decides to give up his life of wealth and luxury to devote himself to rescuing Katyusha\, even if it means following her into exile in Siberia. But can a man truly find redemption by saving another person? Tolstoy's most controversial novel\, Resurrection (1899) is a scathing indictment of injustice\, corruption and hypocrisy at all levels of society. Creating a vast panorama of Russian life\, from peasants to aristocrats\, bureaucrats to convicts\, it reveals Tolstoy's magnificent storytelling powers. Anthony Briggs' superb new translation preserves Tolstoy's gripping realism and satirical humour. In his introduction\, Briggs discusses the true story behind Resurrection\, Tolstoy's political and religious reasons for writing the novel\, his gift for characterization and the compelling psychological portrait of Dmitri. This edition also includes a chronology\, notes and a summary of chapters. For more than seventy years\, Penguin has been the leading publisher of classic literature in the English-speaking world. With more than 1\,700 titles\, Penguin Classics represents a global bookshelf of the best works throughout history and across genres and disciplines. Readers trust the series to provide authoritative texts enhanced by introductions and notes by distinguished scholars and contemporary authors\, as well as up-to-date translations by award-winning translators. +2714,Return of the King Being the Third Part of The Lord of the Rings,J. R. R. Tolkien,2012,HarperCollins,4.60,Concluding the story begun in The Hobbit\, this is the final part of Tolkien s epic masterpiece\, The Lord of the Rings\, featuring an exclusive cover image from the film\, the definitive text\, and a detailed map of Middle-earth. The armies of the Dark Lord Sauron are massing as his evil shadow spreads ever wider. Men\, Dwarves\, Elves and Ents unite forces to do battle agains the Dark. Meanwhile\, Frodo and Sam struggle further into Mordor in their heroic quest to destroy the One Ring. The devastating conclusion of J.R.R. Tolkien s classic tale of magic and adventure\, begun in The Fellowship of the Ring and The Two Towers\, features the definitive edition of the text and includes the Appendices and a revised Index in full. To celebrate the release of the first of Peter Jackson s two-part film adaptation of The Hobbit\, THE HOBBIT: AN UNEXPECTED JOURNEY\, this third part of The Lord of the Rings is available for a limited time with an exclusive cover image from Peter Jackson s award-winning trilogy. +7350,Return of the Shadow,[John Ronald Reuel Tolkien, Christopher Tolkien],2000,Mariner Books,5.00,In this sixth volume of The History of Middle-earth the story reaches The Lord of the Rings. In The Return of the Shadow (an abandoned title for the first volume) Christopher Tolkien describes\, with full citation of the earliest notes\, outline plans\, and narrative drafts\, the intricate evolution of The Fellowship of the Ring and the gradual emergence of the conceptions that transformed what J.R.R. Tolkien for long believed would be a far shorter book\, 'a sequel to The Hobbit'. The enlargement of Bilbo's 'magic ring' into the supremely potent and dangerous Ruling Ring of the Dark Lord is traced and the precise moment is seen when\, in an astonishing and unforeseen leap in the earliest narrative\, a Black Rider first rode into the Shire\, his significance still unknown. The character of the hobbit called Trotter (afterwards Strider or Aragorn) is developed while his indentity remains an absolute puzzle\, and the suspicion only very slowly becomes certainty that he must after all be a Man. The hobbits\, Frodo's companions\, undergo intricate permutations of name and personality\, and other major figures appear in strange modes: a sinister Treebeard\, in league with the Enemy\, a ferocious and malevolent Farmer Maggot. The story in this book ends at the point where J.R.R. Tolkien halted in the story for a long time\, as the Company of the Ring\, still lacking Legolas and Gimli\, stood before the tomb of Balin in the Mines of Moria. The Return of the Shadow is illustrated with reproductions of the first maps and notable pages from the earliest manuscripts. +6760,Roverandom,J. R. R. Tolkien,1999,Mariner Books,4.38,Rover\, a dog who has been turned into a toy dog encounters rival wizards and experiences various adventures on the moon with giant spiders\, dragon moths\, and the Great White Dragon. By the author of The Hobbit. Reprint. +8873,Searoad: Chronicles of Klatsand,Ursula K. Le Guin,2004,Shambhala Publications,5.00,A series of interlinking tales and a novella by the author of the Earthsea trilogy portrays the triumphs and struggles of several generations of women who independently control Klatsand\, a small resort town on the Oregon coast. Reprint. +2378,Selected Letters of Lucretia Coffin Mott (Women in American History),[Lucretia Mott, Holly Byers Ochoa, Carol Faulkner],2002,University of Illinois Press,5.00,Dedicated to reform of almost every kind - temperance\, peace\, equal rights\, woman suffrage\, nonresistance\, and the abolition of slavery - Mott viewed women's rights as only one element of a broad-based reform agenda for American society. +1502,Selected Passages from Correspondence with Friends,Nikolai Vasilevich Gogol,2009,Vanderbilt University Press,4.00,Nikolai Gogol wrote some letters to his friends\, none of which were a nose of high rank. Many are reproduced here (the letters\, not noses). +5996,Smith of Wooten Manor & Farmer Giles of Ham,John Ronald Reuel Tolkien,1969,Del Rey,4.91,Two bewitching fantasies by J.R.R. Tolkien\, beloved author of THE HOBBIT. In SMITH OF WOOTTON MAJOR\, Tolkien explores the gift of fantasy\, and what it means to the life and character of the man who receives it. And FARMER GILES OF HAM tells a delightfully ribald mock-heroic tale\, where a dragon who invades a town refuses to fight\, and a farmer is chosen to slay him. +2301,Smith of Wootton Major & Farmer Giles of Ham,John Ronald Reuel Tolkien,1969,Del Rey,5.00,Two bewitching fantasies by J.R.R. Tolkien\, beloved author of THE HOBBIT. In SMITH OF WOOTTON MAJOR\, Tolkien explores the gift of fantasy\, and what it means to the life and character of the man who receives it. And FARMER GILES OF HAM tells a delightfully ribald mock-heroic tale\, where a dragon who invades a town refuses to fight\, and a farmer is chosen to slay him. +2236,Steering the Craft,Ursula K. Le Guin,2015,Houghton Mifflin Harcourt,4.73,A revised and updated guide to the essentials of a writer's craft\, presented by a brilliant practitioner of the art Completely revised and rewritten to address the challenges and opportunities of the modern era\, this handbook is a short\, deceptively simple guide to the craft of writing. Le Guin lays out ten chapters that address the most fundamental components of narrative\, from the sound of language to sentence construction to point of view. Each chapter combines illustrative examples from the global canon with Le Guin's own witty commentary and an exercise that the writer can do solo or in a group. She also offers a comprehensive guide to working in writing groups\, both actual and online. Masterly and concise\, Steering the Craft deserves a place on every writer's shelf. +4724,THE UNVANQUISHED,William Faulkner,2011,Vintage,3.50,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. +5948,That We Are Gentle Creatures,Fyodor Dostoevsky,2009,OUP Oxford,4.33,In the stories in this volume Dostoevsky explores both the figure of the dreamer divorced from reality and also his own ambiguous attitude to utopianism\, themes central to many of his great novels. In White Nights the apparent idyll of the dreamer's romantic fantasies disguises profound loneliness and estrangement from 'living life'. Despite his sentimental friendship with Nastenka\, his final withdrawal into the world of the imagination anticipates the retreat into the 'underground' of many of Dostoevsky's later intellectual heroes. A Gentle Creature and The Dream of a Ridiculous Man show how such withdrawal from reality can end in spiritual desolation and moral indifference and how\, in Dostoevsky's view\, the tragedy of the alienated individual can be resolved only by the rediscovery of a sense of compassion and responsibility towards fellow human beings. This new translation captures the power and lyricism of Dostoevsky's writing\, while the introduction examines the stories in relation to one another and to his novels. ABOUT THE SERIES: For over 100 years Oxford World's Classics has made available the widest range of literature from around the globe. Each affordable volume reflects Oxford's commitment to scholarship\, providing the most accurate text plus a wealth of other valuable features\, including expert introductions by leading authorities\, helpful notes to clarify the text\, up-to-date bibliographies for further study\, and much more. +1937,The Best Short Stories of Dostoevsky (Modern Library),Fyodor Dostoevsky,2012,Modern Library,4.33,This collection\, unique to the Modern Library\, gathers seven of Dostoevsky's key works and shows him to be equally adept at the short story as with the novel. Exploring many of the same themes as in his longer works\, these small masterpieces move from the tender and romantic White Nights\, an archetypal nineteenth-century morality tale of pathos and loss\, to the famous Notes from the Underground\, a story of guilt\, ineffectiveness\, and uncompromising cynicism\, and the first major work of existential literature. Among Dostoevsky's prototypical characters is Yemelyan in The Honest Thief\, whose tragedy turns on an inability to resist crime. Presented in chronological order\, in David Magarshack's celebrated translation\, this is the definitive edition of Dostoevsky's best stories. +2776,The Devil and Other Stories (Oxford World's Classics),Leo Tolstoy,2003,OUP Oxford,5.00,'It is impossible to explain why Yevgeny chose Liza Annenskaya\, as it is always impossible to explain why a man chooses this and not that woman.' This collection of eleven stories spans virtually the whole of Tolstoy's creative life. While each is unique in form\, as a group they are representative of his style\, and touch on the central themes that surface in War and Peace and Anna Karenina. Stories as different as 'The Snowstorm'\, 'Lucerne'\, 'The Diary of a Madman'\, and 'The Devil' are grounded in autobiographical experience. They deal with journeys of self-discovery and the moral and religious questioning that characterizes Tolstoy's works of criticism and philosophy. 'Strider' and 'Father Sergy'\, as well as reflecting Tolstoy's own experiences\, also reveal profound psychological insights. These stories range over much of the Russian world of the nineteenth century\, from the nobility to the peasantry\, the military to the clergy\, from merchants and cobblers to a horse and a tree. Together they present a fascinating picture of Tolstoy's skill and artistry. ABOUT THE SERIES: For over 100 years Oxford World's Classics has made available the widest range of literature from around the globe. Each affordable volume reflects Oxford's commitment to scholarship\, providing the most accurate text plus a wealth of other valuable features\, including expert introductions by leading authorities\, helpful notes to clarify the text\, up-to-date bibliographies for further study\, and much more. +4231,The Dispossessed,Ursula K. Le Guin,1974,Harpercollins,4.26,Frequently reissued with the same ISBN\, but with slightly differing bibliographical details. +7480,The Hobbit,J. R. R. Tolkien,2012,Mariner Books,4.64,Celebrating 75 years of one of the world's most treasured classics with an all new trade paperback edition. Repackaged with new cover art. 500\,000 first printing. +6405,The Hobbit or There and Back Again,J. R. R. Tolkien,2012,Mariner Books,4.63,Celebrating 75 years of one of the world's most treasured classics with an all new trade paperback edition. Repackaged with new cover art. 500\,000 first printing. +2540,The Inspector General (Language - Russian) (Russian Edition),[Nicolai Gogol, Thomas Seltzer],2014,CreateSpace,3.50,The Inspector-General is a national institution. To place a purely literary valuation upon it and call it the greatest of Russian comedies would not convey the significance of its position either in Russian literature or in Russian life itself. There is no other single work in the modern literature of any language that carries with it the wealth of associations which the Inspector-General does to the educated Russian. +2951,The Insulted and Injured,Fyodor Dostoevsky,2011,Wm. B. Eerdmans Publishing,4.00,The Insulted and Injured\, which came out in 1861\, was Fyodor Dostoevsky's first major work of fiction after his Siberian exile and the first of the long novels that made him famous. Set in nineteenth-century Petersburg\, this gripping novel features a vividly drawn set of characters - including Vanya (Dostoevsky's semi-autobiographical hero)\, Natasha (the woman he loves)\, and Alyosha (Natasha's aristocratic lover) - all suffering from the cruelly selfish machinations of Alyosha's father\, the dark and powerful Prince Valkovsky. Boris Jakim's fresh English-language rendering of this gem in the Doestoevsky canon is both more colorful and more accurate than any earlier translation. --from back cover. +2130,The J. R. R. Tolkien Audio Collection,[John Ronald Reuel Tolkien, Christopher Tolkien],2002,HarperCollins Publishers,4.89,For generations\, J R R Tolkien's words have brought to thrilling life a world of hobbits\, magic\, and historic myth\, woken from its foggy slumber within our minds. Here\, he tells the tales in his own voice. +9801,The Karamazov Brothers (Oxford World's Classics),Fyodor Dostoevsky,2008,Oxford University Press,4.40,A remarkable work showing the author's power to depict Russian character and his understanding of human nature. Driven by intense\, uncontrollable emotions of rage and revenge\, the four Karamazov brothers all become involved in the brutal murder of their despicable father. +5469,The Lays of Beleriand,[John Ronald Reuel Tolkien, Christopher Tolkien],2002,Harpercollins Pub Limited,4.42,The third volume that contains the early myths and legends which led to the writing of Tolkien's epic tale of war\, The Silmarillion. This\, the third volume of The History of Middle-earth\, gives us a priviledged insight into the creation of the mythology of Middle-earth\, through the alliterative verse tales of two of the most crucial stories in Tolkien's world -- those of Turien and Luthien. The first of the poems is the unpublished Lay of The Children of Hurin\, narrating on a grand scale the tragedy of Turin Turambar. The second is the moving Lay of Leithian\, the chief source of the tale of Beren and Luthien in The Silmarillion\, telling of the Quest of the Silmaril and the encounter with Morgoth in his subterranean fortress. Accompanying the poems are commentaries on the evolution of the history of the Elder Days. Also included is the notable criticism of The Lay of The Leithian by CS Lewis\, who read the poem in 1929. +2675,The Lord of the Rings - Boxed Set,J.R.R. Tolkien,2012,HarperCollins,4.56,This beautiful gift edition of The Hobbit\, J.R.R. Tolkien's classic prelude to his Lord of the Rings trilogy\, features cover art\, illustrations\, and watercolor paintings by the artist Alan Lee. Bilbo Baggins is a hobbit who enjoys a comfortable\, unambitious life\, rarely traveling any farther than his pantry or cellar. But his contentment is disturbed when the wizard Gandalf and a company of dwarves arrive on his doorstep one day to whisk him away on an adventure. They have launched a plot to raid the treasure hoard guarded by Smaug the Magnificent\, a large and very dangerous dragon. Bilbo reluctantly joins their quest\, unaware that on his journey to the Lonely Mountain he will encounter both a magic ring and a frightening creature known as Gollum. Written for J.R.R. Tolkien's own children\, The Hobbit has sold many millions of copies worldwide and established itself as a modern classic. +7140,The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1),[J. R. R. Tolkien, Alan Lee],2002,HarperSport,4.75,A selection of stunning poster paintings from the celebrated Tolkien artist Alan Lee - the man behind many of the striking images from The Lord of The Rings movie. The 50 paintings contained within the centenary edition of The Lord of the Rings in 1992 have themselves become classics and Alan Lee's interpretations are hailed as the most faithful to Tolkien's own vision. This new poster collection\, a perfect complement to volume one\, reproduces six more of the most popular paintings from the book in a format suitable either for hanging as posters or mounting and framing. +5127,The Overcoat, Nikolai Gogol,1992,Courier Corporation,3.75,Four short stories include a satirical tale of Russian bureaucrats and a portrayal of an elderly couple living in the secluded countryside. +8875,The Two Towers,John Ronald Reuel Tolkien,2007,HarperCollins UK,4.64,The second volume in The Lord of the Rings\, This title is also available as a film. +4977,The Unvanquished,William Faulkner,2011,Vintage,3.50,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. +4382,The Wolves of Witchmaker,Carole Guinane,2001,iUniverse,5.00,Polly Lavender is mysteriously lured onto Witchmaker's grounds along with her best friends Tony Rico\, Gracie Reene\, and Zeus\, the wolf they rescued as a pup. The three must quickly learn to master the art of magic because they have been chosen to lead Witchmaker Prep against a threat that has grim consequences. +7912,The Word For World is Forest,Ursula K. Le Guin,2015,Gollancz,4.22,When the inhabitants of a peaceful world are conquered by the bloodthirsty yumens\, their existence is irrevocably altered. Forced into servitude\, the Athsheans find themselves at the mercy of their brutal masters. Desperation causes the Athsheans\, led by Selver\, to retaliate against their captors\, abandoning their strictures against violence. But in defending their lives\, they have endangered the very foundations of their society. For every blow against the invaders is a blow to the humanity of the Athsheans. And once the killing starts\, there is no turning back. +1211,The brothers Karamazov,Fyodor Dostoevsky,2003,Bantam Classics,1.00,In 1880 Dostoevsky completed The Brothers Karamazov\, the literary effort for which he had been preparing all his life. Compelling\, profound\, complex\, it is the story of a patricide and of the four sons who each had a motive for murder: Dmitry\, the sensualist\, Ivan\, the intellectual\, Alyosha\, the mystic\, and twisted\, cunning Smerdyakov\, the bastard child. Frequently lurid\, nightmarish\, always brilliant\, the novel plunges the reader into a sordid love triangle\, a pathological obsession\, and a gripping courtroom drama. But throughout the whole\, Dostoevsky searhes for the truth--about man\, about life\, about the existence of God. A terrifying answer to man's eternal questions\, this monumental work remains the crowning achievement of perhaps the finest novelist of all time. From the Paperback edition. +8086,The grand inquisitor (Milestones of thought),Fyodor Dostoevsky,1981,A&C Black,4.09,Dostoevsky's portrayal of the Catholic Church during the Inquisition is a plea for the power of pure faith\, and a critique of the tyrannies of institutionalized religion. This is an except from the Brothers Karamazov which stands alone as a statement of philiosophy and a warning about the surrender of freedom for the sake of comfort. +8077,The unvanquished,William Faulkner,2011,Vintage,4.00,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. +8480,The wind's twelve quarters: Short stories,Ursula K. Le Guin,2017,HarperCollins,5.00,The recipient of numerous literary prizes\, including the National Book Award\, the Kafka Award\, and the Pushcart Prize\, Ursula K. Le Guin is renowned for her lyrical writing\, rich characters\, and diverse worlds. The Wind's Twelve Quarters collects seventeen powerful stories\, each with an introduction by the author\, ranging from fantasy to intriguing scientific concepts\, from medieval settings to the future. Including an insightful foreword by Le Guin\, describing her experience\, her inspirations\, and her approach to writing\, this stunning collection explores human values\, relationships\, and survival\, and showcases the myriad talents of one of the most provocative writers of our time. +2847,To Love A Dark Stranger (Lovegram Historical Romance),Colleen Faulkner,1997,Zebra Books,5.00,Bestselling author Colleen Faulkner's tumultuous saga of royal intrigue and forbidden desire sweeps from the magnificent estates of the aristocracy to the shadowy streets of London to King Charles II's glittering Restoration court. +3293,Universe by Design,Danny Faulkner,2004,New Leaf Publishing Group,4.25,Views the stars and planets from a creationist standpoint\, addresses common misconceptions and difficulties about relativity and cosmology\, and discusses problems with the big bang theory with many analogies\, examples\, diagrams\, and illustrations. Original. +5327,War and Peace,Leo Tolstoy,2016,Lulu.com,3.84,Covering the period from the French invasion under Napoleon into Russia. Although not covering solely the war itself\, the serialized novel does cover the effects the war had on Russian society from the common person right up to the Tsar himself. The book starts to move more to a philosophical consideration on war and peace near the end making the book as a whole an important piece of literature. +4536,War and Peace (Signet Classics),[Leo Tolstoy, Pat Conroy, John Hockenberry],2012,Signet Classics,4.75,Presents the classical epic of the Napoleonic Wars and their effects on four Russian families. +9032,War and Peace: A Novel (6 Volumes),Tolstoy Leo,2013,Hardpress Publishing,3.81,Unlike some other reproductions of classic texts (1) We have not used OCR(Optical Character Recognition)\, as this leads to bad quality books with introduced typos. (2) In books where there are images such as portraits\, maps\, sketches etc We have endeavoured to keep the quality of these images\, so they represent accurately the original artefact. Although occasionally there may be certain imperfections with these old texts\, we feel they deserve to be made available for future generations to enjoy. +5119,William Faulkner,William Faulkner,2011,Vintage,4.00,This invaluable volume\, which has been republished to commemorate the one-hundredth anniversary of Faulkner's birth\, contains some of the greatest short fiction by a writer who defined the course of American literature. Its forty-five stories fall into three categories: those not included in Faulkner's earlier collections\, previously unpublished short fiction\, and stories that were later expanded into such novels as The Unvanquished\, The Hamlet\, and Go Down\, Moses. With its Introduction and extensive notes by the biographer Joseph Blotner\, Uncollected Stories of William Faulkner is an essential addition to its author's canon--as well as a book of some of the most haunting\, harrowing\, and atmospheric short fiction written in the twentieth century. +8615,Winter notes on summer impressions,Fyodor Dostoevsky,2018,Alma Books,4.75,In June 1862\, Dostoevsky left Petersburg on his first excursion to Western Europe. Ostensibly making the trip to consult Western specialists about his epilepsy\, he also wished to see first-hand the source of the Western ideas he believed were corrupting Russia. Over the course of his journey he visited a number of major cities\, including Berlin\, Paris\, London\, Florence\, Milan and Vienna.His record of the trip\, Winter Notes on Summer Impressions - first published in the February 1863 issue of Vremya\, the periodical he edited - is the chrysalis out of which many elements of his later masterpieces developed. +6478,Woman-The Full Story: A Dynamic Celebration of Freedoms,Michele Guinness,2003,Zondervan,5.00,What does it mean to be a woman today? What have women inherited from their radical\, risk-taking sisters of the past? And how does God view this half of humanity? Michele Guinness invites us on an adventure of discovery\, exploring the biblical texts\, the annals of history and the experiences of women today in search of the challenges and achievements\, failures and joys\, of women throughout the ages. +8678,Worlds of Exile and Illusion: Three Complete Novels of the Hainish Series in One Volume--Rocannon's World\, Planet of Exile\, City of Illusions,Ursula K. Le Guin,2016,Orb Books,4.41,Worlds of Exile and Illusion contains three novels in the Hainish Series from Ursula K. Le Guin\, one of the greatest science fiction writers and many times the winner of the Hugo and Nebula Awards. Her career as a novelist was launched by the three novels contained here. These books\, Rocannon's World\, Planet of Exile\, and City of Illusions\, are set in the same universe as Le Guin's groundbreaking classic\, The Left Hand of Darkness. At the Publisher's request\, this title is being sold without Digital Rights Management Software (DRM) applied. diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-books.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-books.json new file mode 100644 index 0000000000000..29e3c94c579b1 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-books.json @@ -0,0 +1,30 @@ +{ + "properties": { + "book_no": { + "type": "keyword" + }, + "title": { + "type": "text" + }, + "author": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "description": { + "type": "text" + }, + "publisher": { + "type": "text" + }, + "ratings": { + "type": "float" + }, + "year": { + "type": "integer" + } + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec index 574f27b8c1fed..56eded5ce4603 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec @@ -4,65 +4,89 @@ singleMatchWithTextField required_capability: match_operator -from airports | where name match "london" | keep abbrev, name | sort abbrev; +from books | where author match "William Faulkner" | keep book_no, author | sort book_no | LIMIT 5; -abbrev:keyword | name:text -LGW | London Gatwick -LHR | London Heathrow -LTN | London Luton +book_no:keyword | author:text +2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] +2713 | William Faulkner +2847 | Colleen Faulkner +2883 | William Faulkner +3293 | Danny Faulkner ; singleMatchWithKeywordField required_capability: match_operator -from airports | where abbrev match "LTN" | keep abbrev, name | sort abbrev; +from books | where author.keyword match "William Faulkner" | keep book_no, author | sort book_no; -abbrev:keyword | name:text -LTN | London Luton +book_no:keyword | author:text +2713 | William Faulkner +2883 | William Faulkner +4724 | William Faulkner +4977 | William Faulkner +5119 | William Faulkner +5404 | William Faulkner +5578 | William Faulkner +8077 | William Faulkner +9896 | William Faulkner ; multipleMatch required_capability: match_operator -from airports | where name match "london" or name match "liverpool "| keep abbrev, name | sort abbrev; +from books +| where (description match "Sauron" OR description match "Dark Lord") AND + (author match "J. R. R. Tolkien" OR author match "John Ronald Reuel Tolkien") +| keep book_no, title, author +| sort book_no +| limit 4 +; -abbrev:keyword | name:text -LGW | London Gatwick -LHR | London Heathrow -LPL | Liverpool John Lennon -LTN | London Luton +book_no:keyword | title:text | author:text +1463 | Realms of Tolkien: Images of Middle-earth | J. R. R. Tolkien +2675 | The Lord of the Rings - Boxed Set | J.R.R. Tolkien +2714 | Return of the King Being the Third Part of The Lord of the Rings | J. R. R. Tolkien +2936 | Fellowship of the Ring 2ND Edition | John Ronald Reuel Tolkien ; multipleWhereWithMatch required_capability: match_operator -from airports | where name match "john" | WHERE name match "St" | keep abbrev, name | sort abbrev; +from books +| where title match "short stories" +| where author match "Ursula K. Le Guin" +| keep book_no, title, author +| sort book_no +; -abbrev:keyword | name:text -YXJ | Fort St. John (N. Peace) +book_no:keyword | title:text | author:text +8480 | The wind's twelve quarters: Short stories | Ursula K. Le Guin ; combinedMatchWithFunctions required_capability: match_operator -from airports -| where name match "john" AND country match "Canada" AND scalerank > 5 -| where length(name) > 10 -| keep abbrev, name, country, scalerank -| sort abbrev +from books +| where title match "Tolkien" AND author match "Tolkien" AND year > 2000 +| where mv_count(author) == 1 +| keep book_no, title, author, year +| sort book_no ; -abbrev:keyword | name:text | country:keyword | scalerank: integer -YHM | John C. Munro Hamilton Int'l | Canada | 8 -YXJ | Fort St. John (N. Peace) | Canada | 8 +book_no:keyword | title:text | author:text | year:integer +5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 ; matchWithStats required_capability: match_operator -from airports -| where name match "john" AND scalerank > 5 -| where length(name) > 10 -| stats count(*) BY type -| sort type +from books +| where author match "faulkner" AND year > 1990 +| where mv_count(author) == 1 +| stats count(*) BY author.keyword +| sort author.keyword ; -count(*): long | type:keyword -1 | major -2 | mid +count(*): long | author.keyword:keyword +1 | Bettilu Stein Faulkner +2 | Colleen Faulkner +1 | Danny Faulkner +1 | Keith Faulkner +1 | Paul Faulkner +8 | William Faulkner ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match.csv-spec index bdc11c78c8f48..2bc2a865c0052 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match.csv-spec @@ -1,34 +1,47 @@ matchKeywordField required_capability: match_command -from employees | match "first_name: Ma*" | keep emp_no, first_name | sort emp_no; - -emp_no:integer | first_name:keyword -10011 |Mary -10020 |Mayuko -10042 |Magy -10054 |Mayumi -10069 |Margareta +from books | match "author.keyword: *Stein*" | keep book_no, author | sort book_no; + +book_no:keyword | author:text +7381 | Bettilu Stein Faulkner ; -matchMultipleKeywordFields +matchMultipleTextFields required_capability: match_command -from employees | match "+first_name: Ma* +last_name:*man" | keep emp_no, first_name, last_name | sort emp_no; +from books | match "title:Return* AND author:*Tolkien" | keep book_no, title | sort book_no; -emp_no:integer | first_name:keyword | last_name:keyword -10069 |Margareta | Bierman +book_no:keyword | title:text +2714 | Return of the King Being the Third Part of The Lord of the Rings +7350 | Return of the Shadow ; -matchTextField +matchAllFields required_capability: match_command -from airports | match "lon*" | keep abbrev, name | sort abbrev; +from books | match "dark AND lord AND Sauron" | keep book_no, title | sort book_no; + +book_no:keyword | title:text +2714 | Return of the King Being the Third Part of The Lord of the Rings +2936 | Fellowship of the Ring 2ND Edition +; + +matchWithWhereFunctionsAndStats +required_capability: match_command + +from books +| match "Faulkner AND ratings:>4.0" +| where year > 1950 and mv_count(author) == 1 +| stats count(*) BY author.keyword +| sort author.keyword +; -abbrev:keyword | name:text -CGQ | Changchun Longjia Int'l -LGW | London Gatwick -LHR | London Heathrow -LTN | London Luton -LYR | Svalbard Longyear +count(*): long | author.keyword:keyword +1 | Bettilu Stein Faulkner +2 | Colleen Faulkner +1 | Danny Faulkner +1 | Keith Faulkner +1 | Paul Faulkner +1 | William Faulkner ; From f152839faf082b5f93ecd718d4c297584c545ffe Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Aug 2024 09:11:14 +0100 Subject: [PATCH 220/389] Remove `InterruptedEx.` from snapshot test harness (#112228) Relates #111957 --- .../snapshots/AbstractSnapshotIntegTestCase.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 1b49209b49c7f..1656a09daa123 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -288,7 +288,7 @@ public static void failReadsAllDataNodes(String repository) { } } - public static void waitForBlockOnAnyDataNode(String repository) throws InterruptedException { + public static void waitForBlockOnAnyDataNode(String repository) { final boolean blocked = waitUntil(() -> { for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { MockRepository mockRepository = (MockRepository) repositoriesService.repository(repository); @@ -475,13 +475,13 @@ protected SnapshotInfo createSnapshot(String repositoryName, String snapshot, Li return createSnapshot(repositoryName, snapshot, indices, Collections.singletonList(NO_FEATURE_STATES_VALUE)); } - protected void createIndexWithRandomDocs(String indexName, int docCount) throws InterruptedException { + protected void createIndexWithRandomDocs(String indexName, int docCount) { createIndex(indexName); ensureGreen(); indexRandomDocs(indexName, docCount); } - protected void indexRandomDocs(String index, int numdocs) throws InterruptedException { + protected void indexRandomDocs(String index, int numdocs) { logger.info("--> indexing [{}] documents into [{}]", numdocs, index); IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs]; for (int i = 0; i < builders.length; i++) { From 9db177887820c2a210aea1c041a88c162754f034 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Tue, 27 Aug 2024 09:21:43 +0100 Subject: [PATCH 221/389] Use StreamOutput::writeWriteable instead of writeTo directly (#112027) --- .../lifecycle/action/GetDataStreamLifecycleStatsAction.java | 2 +- .../src/main/java/org/elasticsearch/cluster/ClusterState.java | 2 +- .../elasticsearch/cluster/version/CompatibilityVersions.java | 2 +- .../elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java | 2 +- .../elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java | 2 +- .../org/elasticsearch/xpack/esql/session/Configuration.java | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java index 6e930defd4e0b..71f07c8cac668 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java @@ -76,7 +76,7 @@ public Response(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVLong(runDuration); out.writeOptionalVLong(timeBetweenStarts); - out.writeCollection(dataStreamStats, (o, v) -> v.writeTo(o)); + out.writeCollection(dataStreamStats, StreamOutput::writeWriteable); } public Long getRunDuration() { diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index c54269da68507..30e9a9a3779d7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -1081,7 +1081,7 @@ public void writeTo(StreamOutput out) throws IOException { routingTable.writeTo(out); nodes.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { - out.writeMap(compatibilityVersions, (streamOutput, versions) -> versions.writeTo(streamOutput)); + out.writeMap(compatibilityVersions, StreamOutput::writeWriteable); } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { clusterFeatures.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java b/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java index c1489afc6c369..8ebb24e86105a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java +++ b/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java @@ -120,7 +120,7 @@ public void writeTo(StreamOutput out) throws IOException { TransportVersion.writeVersion(this.transportVersion(), out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_11_X)) { - out.writeMap(this.systemIndexMappingsVersion(), (o, v) -> v.writeTo(o)); + out.writeMap(this.systemIndexMappingsVersion(), StreamOutput::writeWriteable); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java index c1903a2910629..5a91e997ca5fe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java @@ -108,6 +108,6 @@ public Map getUsageStatsByTier() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeMap(usageStatsByTier, (o, v) -> v.writeTo(o)); + out.writeMap(usageStatsByTier, StreamOutput::writeWriteable); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index e976a8d9be48e..cec4a5a3509a1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -332,7 +332,7 @@ private static class LookupResponse extends TransportResponse { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(policies, (o, v) -> v.writeTo(o)); + out.writeMap(policies, StreamOutput::writeWriteable); out.writeMap(failures, StreamOutput::writeString); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java index a2777c97e919a..33a48d2e7df05 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java @@ -117,7 +117,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(profile); } if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_REQUEST_TABLES)) { - out.writeMap(tables, (o1, columns) -> o1.writeMap(columns, (o2, column) -> column.writeTo(o2))); + out.writeMap(tables, (o1, columns) -> o1.writeMap(columns, StreamOutput::writeWriteable)); } } From 25fdcd29276c508e5c69f6d855dc97daed8cfc08 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Tue, 27 Aug 2024 11:22:52 +0200 Subject: [PATCH 222/389] ES|QL: cache EsField on serialization (#112008) As a follow-up to https://github.com/elastic/elasticsearch/pull/111447, with this change we also cache `EsFields`. This gives us an additional 30-40% reduction on the size of serialized plan, according to [these tests](https://github.com/elastic/elasticsearch/pull/111980) Related to https://github.com/elastic/elasticsearch/issues/111358 --- .../org/elasticsearch/TransportVersions.java | 1 + .../test/AbstractWireSerializingTestCase.java | 9 ++- .../esql/core/expression/FieldAttribute.java | 4 +- .../xpack/esql/core/type/DateEsField.java | 14 ++--- .../xpack/esql/core/type/EsField.java | 61 +++++++++++++------ .../esql/core/type/InvalidMappedField.java | 21 ++----- .../xpack/esql/core/type/KeywordEsField.java | 17 ++---- .../esql/core}/type/MultiTypeEsField.java | 18 ++---- .../xpack/esql/core/type/TextEsField.java | 13 ++-- .../esql/core/type/UnsupportedEsField.java | 15 ++--- .../xpack/esql/core/util/PlanStreamInput.java | 3 + .../esql/core/util/PlanStreamOutput.java | 10 +++ .../xpack/esql/analysis/Analyzer.java | 2 +- .../esql/enrich/EnrichPolicyResolver.java | 13 ++-- .../esql/enrich/ResolvedEnrichPolicy.java | 2 +- .../function/UnsupportedAttribute.java | 12 +++- .../xpack/esql/index/EsIndex.java | 8 +-- .../xpack/esql/io/stream/PlanStreamInput.java | 49 ++++++++++++++- .../esql/io/stream/PlanStreamOutput.java | 38 ++++++++++++ .../planner/EsPhysicalOperationProviders.java | 2 +- .../xpack/esql/plugin/EsqlPlugin.java | 4 -- .../xpack/esql/SerializationTestUtils.java | 2 - .../AbstractExpressionSerializationTests.java | 2 - .../xpack/esql/expression/AliasTests.java | 2 - .../function/AbstractAttributeTestCase.java | 2 - .../function/FieldAttributeTests.java | 2 +- .../function/UnsupportedAttributeTests.java | 2 +- .../esql/index/EsIndexSerializationTests.java | 32 ++++++---- .../esql/io/stream/PlanNamedTypesTests.java | 2 +- .../esql/io/stream/PlanStreamOutputTests.java | 37 ++++++++++- ...AbstractLogicalPlanSerializationTests.java | 2 - ...bstractPhysicalPlanSerializationTests.java | 2 - .../ExchangeSinkExecSerializationTests.java | 12 ++-- .../esql}/type/AbstractEsFieldTypeTests.java | 42 ++++++++++--- .../esql}/type/DataTypeConversionTests.java | 5 +- .../xpack/esql}/type/DateEsFieldTests.java | 5 +- .../xpack/esql}/type/EsFieldTests.java | 5 +- .../esql}/type/InvalidMappedFieldTests.java | 5 +- .../xpack/esql}/type/KeywordEsFieldTests.java | 4 +- .../esql/type/MultiTypeEsFieldTests.java | 16 ++--- .../xpack/esql}/type/TextEsFieldTests.java | 5 +- .../esql}/type/UnsupportedEsFieldTests.java | 5 +- 42 files changed, 336 insertions(+), 171 deletions(-) rename x-pack/plugin/{esql/src/main/java/org/elasticsearch/xpack/esql => esql-core/src/main/java/org/elasticsearch/xpack/esql/core}/type/MultiTypeEsField.java (86%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/AbstractEsFieldTypeTests.java (57%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/DataTypeConversionTests.java (99%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/DateEsFieldTests.java (89%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/EsFieldTests.java (91%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/InvalidMappedFieldTests.java (90%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/KeywordEsFieldTests.java (92%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/TextEsFieldTests.java (90%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/UnsupportedEsFieldTests.java (91%) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 78f1b21ea7a44..33f483c57b54e 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -197,6 +197,7 @@ static TransportVersion def(int id) { public static final TransportVersion LTR_SERVERLESS_RELEASE = def(8_727_00_0); public static final TransportVersion ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT = def(8_728_00_0); public static final TransportVersion RANK_DOCS_RETRIEVER = def(8_729_00_0); + public static final TransportVersion ESQL_ES_FIELD_CACHED_SERIALIZATION = def(8_730_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java index 82d7f98f34301..b4503a69acca3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java @@ -25,11 +25,18 @@ public abstract class AbstractWireSerializingTestCase exten */ protected abstract Writeable.Reader instanceReader(); + /** + * Returns a {@link Writeable.Writer} that will be used to serialize the instance + */ + protected Writeable.Writer instanceWriter() { + return StreamOutput::writeWriteable; + } + /** * Copy the {@link Writeable} by round tripping it through {@linkplain StreamInput} and {@linkplain StreamOutput}. */ @Override protected final T copyInstance(T instance, TransportVersion version) throws IOException { - return copyWriteable(instance, getNamedWriteableRegistry(), instanceReader(), version); + return copyInstance(instance, getNamedWriteableRegistry(), instanceWriter(), instanceReader(), version); } } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java index 8e8973a11bc8a..37f2cf863d53e 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java @@ -112,7 +112,7 @@ private FieldAttribute(StreamInput in) throws IOException { in.readOptionalWriteable(FieldAttribute::readFrom), in.readString(), DataType.readFrom(in), - in.readNamedWriteable(EsField.class), + EsField.readFrom(in), in.readOptionalString(), in.readEnum(Nullability.class), NameId.readFrom((StreamInput & PlanStreamInput) in), @@ -127,7 +127,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(parent); out.writeString(name()); dataType().writeTo(out); - out.writeNamedWriteable(field); + field.writeTo(out); // We used to write the qualifier here. We can still do if needed in the future. out.writeOptionalString(null); out.writeEnum(nullable()); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DateEsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DateEsField.java index 01728954a2e1b..f829bcdea94e4 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DateEsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DateEsField.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -17,7 +16,6 @@ * Information about a field in an ES index with the {@code date} type */ public class DateEsField extends EsField { - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(EsField.class, "DateEsField", DateEsField::new); public static DateEsField dateEsField(String name, Map properties, boolean hasDocValues) { return new DateEsField(name, DataType.DATETIME, properties, hasDocValues); @@ -27,19 +25,19 @@ private DateEsField(String name, DataType dataType, Map propert super(name, dataType, properties, hasDocValues); } - private DateEsField(StreamInput in) throws IOException { - this(in.readString(), DataType.DATETIME, in.readMap(i -> i.readNamedWriteable(EsField.class)), in.readBoolean()); + protected DateEsField(StreamInput in) throws IOException { + this(in.readString(), DataType.DATETIME, in.readImmutableMap(EsField::readFrom), in.readBoolean()); } @Override - public void writeTo(StreamOutput out) throws IOException { + protected void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); - out.writeMap(getProperties(), StreamOutput::writeNamedWriteable); + out.writeMap(getProperties(), (o, x) -> x.writeTo(out)); out.writeBoolean(isAggregatable()); } - @Override public String getWriteableName() { - return ENTRY.name; + return "DateEsField"; } + } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java index eb17d720d2140..899986fecd012 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java @@ -7,34 +7,40 @@ package org.elasticsearch.xpack.esql.core.type; import org.elasticsearch.TransportVersions; -import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.xpack.esql.core.util.PlanStreamInput; +import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import java.io.IOException; -import java.util.List; import java.util.Map; import java.util.Objects; /** * Information about a field in an ES index. */ -public class EsField implements NamedWriteable { - public static List getNamedWriteables() { - return List.of( - EsField.ENTRY, - DateEsField.ENTRY, - InvalidMappedField.ENTRY, - KeywordEsField.ENTRY, - TextEsField.ENTRY, - UnsupportedEsField.ENTRY - ); +public class EsField implements Writeable { + + private static Map> readers = Map.ofEntries( + Map.entry("EsField", EsField::new), + Map.entry("DateEsField", DateEsField::new), + Map.entry("InvalidMappedField", InvalidMappedField::new), + Map.entry("KeywordEsField", KeywordEsField::new), + Map.entry("MultiTypeEsField", MultiTypeEsField::new), + Map.entry("TextEsField", TextEsField::new), + Map.entry("UnsupportedEsField", UnsupportedEsField::new) + ); + + public static Writeable.Reader getReader(String name) { + Reader result = readers.get(name); + if (result == null) { + throw new IllegalArgumentException("Invalid EsField type [" + name + "]"); + } + return result; } - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(EsField.class, "EsField", EsField::new); - private final DataType esDataType; private final boolean aggregatable; private final Map properties; @@ -53,10 +59,10 @@ public EsField(String name, DataType esDataType, Map properties this.isAlias = isAlias; } - public EsField(StreamInput in) throws IOException { + protected EsField(StreamInput in) throws IOException { this.name = in.readString(); this.esDataType = readDataType(in); - this.properties = in.readImmutableMap(i -> i.readNamedWriteable(EsField.class)); + this.properties = in.readImmutableMap(EsField::readFrom); this.aggregatable = in.readBoolean(); this.isAlias = in.readBoolean(); } @@ -77,18 +83,33 @@ private DataType readDataType(StreamInput in) throws IOException { return DataType.readFrom(name); } + public static A readFrom(StreamInput in) throws IOException { + return ((PlanStreamInput) in).readEsFieldWithCache(); + } + @Override public void writeTo(StreamOutput out) throws IOException { + if (((PlanStreamOutput) out).writeEsFieldCacheHeader(this)) { + writeContent(out); + } + } + + /** + * This needs to be overridden by subclasses for specific serialization + */ + protected void writeContent(StreamOutput out) throws IOException { out.writeString(name); esDataType.writeTo(out); - out.writeMap(properties, StreamOutput::writeNamedWriteable); + out.writeMap(properties, (o, x) -> x.writeTo(out)); out.writeBoolean(aggregatable); out.writeBoolean(isAlias); } - @Override + /** + * This needs to be overridden by subclasses for specific serialization + */ public String getWriteableName() { - return ENTRY.name; + return "EsField"; } /** diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java index 8b15893f8a056..d34af0f8565c7 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; @@ -27,11 +26,6 @@ * It is used specifically for the 'union types' feature in ES|QL. */ public class InvalidMappedField extends EsField { - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - EsField.class, - "InvalidMappedField", - InvalidMappedField::new - ); private final String errorMessage; private final Map> typesToIndices; @@ -44,10 +38,6 @@ public InvalidMappedField(String name, String errorMessage) { this(name, errorMessage, new TreeMap<>()); } - public InvalidMappedField(String name) { - this(name, StringUtils.EMPTY, new TreeMap<>()); - } - /** * Constructor supporting union types, used in ES|QL. */ @@ -61,8 +51,8 @@ private InvalidMappedField(String name, String errorMessage, Map i.readNamedWriteable(EsField.class))); + protected InvalidMappedField(StreamInput in) throws IOException { + this(in.readString(), in.readString(), in.readImmutableMap(StreamInput::readString, EsField::readFrom)); } public Set types() { @@ -70,15 +60,14 @@ public Set types() { } @Override - public void writeTo(StreamOutput out) throws IOException { + protected void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); out.writeString(errorMessage); - out.writeMap(getProperties(), StreamOutput::writeNamedWriteable); + out.writeMap(getProperties(), (o, x) -> x.writeTo(out)); } - @Override public String getWriteableName() { - return ENTRY.name; + return "InvalidMappedField"; } public String errorMessage() { diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/KeywordEsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/KeywordEsField.java index d856e3d9d8297..33dcebaf3dec2 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/KeywordEsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/KeywordEsField.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -21,11 +20,6 @@ * Information about a field in an ES index with the {@code keyword} type. */ public class KeywordEsField extends EsField { - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - EsField.class, - "KeywordEsField", - KeywordEsField::new - ); private final int precision; private final boolean normalized; @@ -63,11 +57,11 @@ protected KeywordEsField( this.normalized = normalized; } - private KeywordEsField(StreamInput in) throws IOException { + public KeywordEsField(StreamInput in) throws IOException { this( in.readString(), KEYWORD, - in.readMap(i -> i.readNamedWriteable(EsField.class)), + in.readImmutableMap(EsField::readFrom), in.readBoolean(), in.readInt(), in.readBoolean(), @@ -76,18 +70,17 @@ private KeywordEsField(StreamInput in) throws IOException { } @Override - public void writeTo(StreamOutput out) throws IOException { + protected void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); - out.writeMap(getProperties(), StreamOutput::writeNamedWriteable); + out.writeMap(getProperties(), (o, x) -> x.writeTo(out)); out.writeBoolean(isAggregatable()); out.writeInt(precision); out.writeBoolean(normalized); out.writeBoolean(isAlias()); } - @Override public String getWriteableName() { - return ENTRY.name; + return "KeywordEsField"; } public int getPrecision() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/MultiTypeEsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/MultiTypeEsField.java similarity index 86% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/MultiTypeEsField.java rename to x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/MultiTypeEsField.java index 8b2fc926379f2..81dc77eddcdf8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/MultiTypeEsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/MultiTypeEsField.java @@ -5,15 +5,11 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.type; +package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.core.type.EsField; -import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; import java.io.IOException; import java.util.HashMap; @@ -31,11 +27,6 @@ * type conversion is done at the data node level. */ public class MultiTypeEsField extends EsField { - public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - EsField.class, - "MultiTypeEsField", - MultiTypeEsField::new - ); private final Map indexToConversionExpressions; @@ -44,21 +35,20 @@ public MultiTypeEsField(String name, DataType dataType, boolean aggregatable, Ma this.indexToConversionExpressions = indexToConversionExpressions; } - public MultiTypeEsField(StreamInput in) throws IOException { + protected MultiTypeEsField(StreamInput in) throws IOException { this(in.readString(), DataType.readFrom(in), in.readBoolean(), in.readImmutableMap(i -> i.readNamedWriteable(Expression.class))); } @Override - public void writeTo(StreamOutput out) throws IOException { + public void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); out.writeString(getDataType().typeName()); out.writeBoolean(isAggregatable()); out.writeMap(getIndexToConversionExpressions(), (o, v) -> out.writeNamedWriteable(v)); } - @Override public String getWriteableName() { - return ENTRY.name; + return "MultiTypeEsField"; } public Map getIndexToConversionExpressions() { diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/TextEsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/TextEsField.java index c52230fa65829..0f2f136e74423 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/TextEsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/TextEsField.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Tuple; @@ -23,7 +22,6 @@ * Information about a field in an es index with the {@code text} type. */ public class TextEsField extends EsField { - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(EsField.class, "TextEsField", TextEsField::new); public TextEsField(String name, Map properties, boolean hasDocValues) { this(name, properties, hasDocValues, false); @@ -33,21 +31,20 @@ public TextEsField(String name, Map properties, boolean hasDocV super(name, TEXT, properties, hasDocValues, isAlias); } - private TextEsField(StreamInput in) throws IOException { - this(in.readString(), in.readMap(i -> i.readNamedWriteable(EsField.class)), in.readBoolean(), in.readBoolean()); + protected TextEsField(StreamInput in) throws IOException { + this(in.readString(), in.readImmutableMap(EsField::readFrom), in.readBoolean(), in.readBoolean()); } @Override - public void writeTo(StreamOutput out) throws IOException { + protected void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); - out.writeMap(getProperties(), StreamOutput::writeNamedWriteable); + out.writeMap(getProperties(), (o, x) -> x.writeTo(out)); out.writeBoolean(isAggregatable()); out.writeBoolean(isAlias()); } - @Override public String getWriteableName() { - return ENTRY.name; + return "TextEsField"; } @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsField.java index 13e4d6ad953a8..13ee2b42a321b 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsField.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -20,11 +19,6 @@ * All the subfields (properties) of an unsupported type are also be unsupported. */ public class UnsupportedEsField extends EsField { - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - EsField.class, - "UnsupportedEsField", - UnsupportedEsField::new - ); private final String originalType; private final String inherited; // for fields belonging to parents (or grandparents) that have an unsupported type @@ -40,20 +34,19 @@ public UnsupportedEsField(String name, String originalType, String inherited, Ma } public UnsupportedEsField(StreamInput in) throws IOException { - this(in.readString(), in.readString(), in.readOptionalString(), in.readMap(i -> i.readNamedWriteable(EsField.class))); + this(in.readString(), in.readString(), in.readOptionalString(), in.readImmutableMap(EsField::readFrom)); } @Override - public void writeTo(StreamOutput out) throws IOException { + public void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); out.writeString(getOriginalType()); out.writeOptionalString(getInherited()); - out.writeMap(getProperties(), StreamOutput::writeNamedWriteable); + out.writeMap(getProperties(), (o, x) -> x.writeTo(out)); } - @Override public String getWriteableName() { - return ENTRY.name; + return "UnsupportedEsField"; } public String getOriginalType() { diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java index 01a153feeb473..471c9476ad31d 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.EsField; import java.io.IOException; @@ -44,4 +45,6 @@ public interface PlanStreamInput { * @throws IOException */ A readAttributeWithCache(CheckedFunction constructor) throws IOException; + + A readEsFieldWithCache() throws IOException; } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java index cec68c06e492e..4c30cb66e9f86 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.core.util; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.type.EsField; import java.io.IOException; @@ -21,4 +22,13 @@ public interface PlanStreamOutput { * @throws IOException */ boolean writeAttributeCacheHeader(Attribute attribute) throws IOException; + + /** + * Writes a cache header for an {@link org.elasticsearch.xpack.esql.core.type.EsField} and caches it if it is not already in the cache. + * In that case, the field will have to serialize itself into this stream immediately after this method call. + * @param field The EsField to serialize + * @return true if the attribute needs to serialize itself, false otherwise (ie. if already cached) + * @throws IOException + */ + boolean writeEsFieldCacheHeader(EsField field) throws IOException; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 5b59117ad356b..f88c603b4cacb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -43,6 +43,7 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; +import org.elasticsearch.xpack.esql.core.type.MultiTypeEsField; import org.elasticsearch.xpack.esql.core.type.UnsupportedEsField; import org.elasticsearch.xpack.esql.core.util.CollectionUtils; import org.elasticsearch.xpack.esql.core.util.Holder; @@ -80,7 +81,6 @@ import org.elasticsearch.xpack.esql.session.Configuration; import org.elasticsearch.xpack.esql.stats.FeatureMetric; import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; -import org.elasticsearch.xpack.esql.type.MultiTypeEsField; import java.util.ArrayList; import java.util.Arrays; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index cec4a5a3509a1..f77bfa6d3f862 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -40,6 +40,9 @@ import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.util.StringUtils; import org.elasticsearch.xpack.esql.index.EsIndex; +import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.session.IndexResolver; @@ -326,14 +329,16 @@ private static class LookupResponse extends TransportResponse { } LookupResponse(StreamInput in) throws IOException { - this.policies = in.readMap(StreamInput::readString, ResolvedEnrichPolicy::new); - this.failures = in.readMap(StreamInput::readString, StreamInput::readString); + PlanStreamInput planIn = new PlanStreamInput(in, PlanNameRegistry.INSTANCE, in.namedWriteableRegistry(), null); + this.policies = planIn.readMap(StreamInput::readString, ResolvedEnrichPolicy::new); + this.failures = planIn.readMap(StreamInput::readString, StreamInput::readString); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(policies, StreamOutput::writeWriteable); - out.writeMap(failures, StreamOutput::writeString); + PlanStreamOutput pso = new PlanStreamOutput(out, new PlanNameRegistry(), null); + pso.writeMap(policies, StreamOutput::writeWriteable); + pso.writeMap(failures, StreamOutput::writeString); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java index 44443973764e6..63f22bd40ac39 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java @@ -29,7 +29,7 @@ public ResolvedEnrichPolicy(StreamInput in) throws IOException { in.readString(), in.readStringCollectionAsList(), in.readMap(StreamInput::readString), - in.readMap(EsField::new) + in.readMap(EsField::readFrom) ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java index 78577aa2b91e0..5961d1c21bb02 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.expression.function; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -20,6 +21,7 @@ import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.type.UnsupportedEsField; import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; @@ -75,7 +77,9 @@ private UnsupportedAttribute(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), in.readString(), - new UnsupportedEsField(in), + in.getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION) + ? EsField.readFrom(in) + : new UnsupportedEsField(in), in.readOptionalString(), NameId.readFrom((PlanStreamInput) in) ); @@ -86,7 +90,11 @@ public void writeTo(StreamOutput out) throws IOException { if (((PlanStreamOutput) out).writeAttributeCacheHeader(this)) { Source.EMPTY.writeTo(out); out.writeString(name()); - field().writeTo(out); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION)) { + field().writeTo(out); + } else { + field().writeContent(out); + } out.writeOptionalString(hasCustomMessage ? message : null); id().writeTo(out); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java index 92fa2f76ec8b2..d368c570a3f76 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java @@ -36,17 +36,13 @@ public EsIndex(String name, Map mapping, Set concreteIn @SuppressWarnings("unchecked") public EsIndex(StreamInput in) throws IOException { - this( - in.readString(), - in.readImmutableMap(StreamInput::readString, i -> i.readNamedWriteable(EsField.class)), - (Set) in.readGenericValue() - ); + this(in.readString(), in.readImmutableMap(StreamInput::readString, EsField::readFrom), (Set) in.readGenericValue()); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name()); - out.writeMap(mapping(), StreamOutput::writeNamedWriteable); + out.writeMap(mapping(), (o, x) -> x.writeTo(out)); out.writeGenericValue(concreteIndices()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java index c8e744dfff054..ad66378da5d9e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -28,6 +29,7 @@ import org.elasticsearch.xpack.esql.Column; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.NameId; +import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanNamedReader; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; @@ -65,6 +67,8 @@ public NameId apply(long streamNameId) { private Attribute[] attributesCache = new Attribute[64]; + private EsField[] esFieldsCache = new EsField[64]; + private final PlanNameRegistry registry; // hook for nameId, where can cache and map, for now just return a NameId of the same long value. @@ -239,7 +243,7 @@ private Attribute attributeFromCache(int id) throws IOException { } /** - * Add and attribute to the cache, based on the serialization ID generated by {@link PlanStreamOutput} + * Add an attribute to the cache, based on the serialization ID generated by {@link PlanStreamOutput} * @param id The ID that will reference the attribute. Generated at serialization time * @param attr The attribute to cache */ @@ -250,4 +254,47 @@ private void cacheAttribute(int id, Attribute attr) { } attributesCache[id] = attr; } + + @SuppressWarnings("unchecked") + public A readEsFieldWithCache() throws IOException { + if (getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION)) { + // it's safe to cast to int, since the max value for this is {@link PlanStreamOutput#MAX_SERIALIZED_ATTRIBUTES} + int cacheId = Math.toIntExact(readZLong()); + if (cacheId < 0) { + String className = readString(); + Writeable.Reader reader = EsField.getReader(className); + cacheId = -1 - cacheId; + EsField result = reader.read(this); + cacheEsField(cacheId, result); + return (A) result; + } else { + return (A) esFieldFromCache(cacheId); + } + } else { + String className = readString(); + Writeable.Reader reader = EsField.getReader(className); + return (A) reader.read(this); + } + } + + private EsField esFieldFromCache(int id) throws IOException { + if (esFieldsCache[id] == null) { + throw new IOException("Attribute ID not found in serialization cache [" + id + "]"); + } + return esFieldsCache[id]; + } + + /** + * Add an EsField to the cache, based on the serialization ID generated by {@link PlanStreamOutput} + * @param id The ID that will reference the field. Generated at serialization time + * @param field The EsField to cache + */ + private void cacheEsField(int id, EsField field) { + assert id >= 0; + if (id >= esFieldsCache.length) { + esFieldsCache = ArrayUtil.grow(esFieldsCache); + } + esFieldsCache[id] = field; + } + } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java index f918621d87a24..d76c61eac05d1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.esql.Column; import org.elasticsearch.xpack.esql.core.InvalidArgumentException; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.session.Configuration; @@ -62,6 +63,11 @@ public final class PlanStreamOutput extends StreamOutput implements org.elastics */ protected final Map cachedAttributes = new IdentityHashMap<>(); + /** + * Cache for EsFields. + */ + protected final Map cachedEsFields = new IdentityHashMap<>(); + private final StreamOutput delegate; private final PlanNameRegistry registry; @@ -205,6 +211,38 @@ private int cacheAttribute(Attribute attr) { return id; } + @Override + public boolean writeEsFieldCacheHeader(EsField field) throws IOException { + if (getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION)) { + Integer cacheId = esFieldIdFromCache(field); + if (cacheId != null) { + writeZLong(cacheId); + return false; + } + + cacheId = cacheEsField(field); + writeZLong(-1 - cacheId); + } + writeString(field.getWriteableName()); + return true; + } + + private Integer esFieldIdFromCache(EsField field) { + return cachedEsFields.get(field); + } + + private int cacheEsField(EsField attr) { + if (cachedEsFields.containsKey(attr)) { + throw new IllegalArgumentException("EsField already present in the serialization cache [" + attr + "]"); + } + int id = cachedEsFields.size(); + if (id >= maxSerializedAttributes) { + throw new InvalidArgumentException("Limit of the number of serialized EsFields exceeded [{}]", maxSerializedAttributes); + } + cachedEsFields.put(attr, id); + return id; + } + /** * The byte representing a {@link Block} sent for the first time. The byte * will be followed by a {@link StreamOutput#writeVInt} encoded identifier diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index 45989b4f563ce..8fddb7407a02a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -52,6 +52,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.MultiTypeEsField; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; @@ -60,7 +61,6 @@ import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.DriverParallelism; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.LocalExecutionPlannerContext; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.PhysicalOperation; -import org.elasticsearch.xpack.esql.type.MultiTypeEsField; import java.io.IOException; import java.util.ArrayList; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index b55c5f604023f..f0686baf68f6f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -61,7 +61,6 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.enrich.EnrichLookupOperator; import org.elasticsearch.xpack.esql.execution.PlanExecutor; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; @@ -70,7 +69,6 @@ import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; import org.elasticsearch.xpack.esql.session.IndexResolver; -import org.elasticsearch.xpack.esql.type.MultiTypeEsField; import java.lang.invoke.MethodHandles; import java.util.ArrayList; @@ -193,14 +191,12 @@ public List getNamedWriteables() { entries.add(AsyncOperator.Status.ENTRY); entries.add(EnrichLookupOperator.Status.ENTRY); entries.addAll(Block.getNamedWriteables()); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); entries.add(UnsupportedAttribute.ENTRY); // TODO combine with above once these are in the same project entries.addAll(NamedExpression.getNamedWriteables()); entries.add(UnsupportedAttribute.NAMED_EXPRESSION_ENTRY); // TODO combine with above once these are in the same project entries.addAll(Expression.getNamedWriteables()); entries.add(UnsupportedAttribute.EXPRESSION_ENTRY); // TODO combine with above once these are in the same project - entries.add(MultiTypeEsField.ENTRY); // TODO combine with EsField.getNamedWriteables() once these are in the same module entries.addAll(EsqlScalarFunction.getNamedWriteables()); entries.addAll(AggregateFunction.getNamedWriteables()); entries.addAll(LogicalPlan.getNamedWriteables()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java index d8de034111865..339e7159ed87d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java @@ -27,7 +27,6 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; @@ -119,7 +118,6 @@ public static NamedWriteableRegistry writableRegistry() { entries.add(new NamedWriteableRegistry.Entry(QueryBuilder.class, RegexpQueryBuilder.NAME, RegexpQueryBuilder::new)); entries.add(new NamedWriteableRegistry.Entry(QueryBuilder.class, ExistsQueryBuilder.NAME, ExistsQueryBuilder::new)); entries.add(SingleValueQuery.ENTRY); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); entries.add(UnsupportedAttribute.ENTRY); entries.addAll(NamedExpression.getNamedWriteables()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java index 7a00f8ef154ce..596ff2af5fb5a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Node; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.ReferenceAttributeTests; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; @@ -37,7 +36,6 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { entries.add(UnsupportedAttribute.ENTRY); entries.add(UnsupportedAttribute.NAMED_EXPRESSION_ENTRY); entries.add(UnsupportedAttribute.EXPRESSION_ENTRY); - entries.addAll(EsField.getNamedWriteables()); entries.add(org.elasticsearch.xpack.esql.expression.Order.ENTRY); return new NamedWriteableRegistry(entries); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java index 36f8b43e69378..2a6791a1f5300 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.tree.SourceTests; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.ReferenceAttributeTests; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; @@ -81,7 +80,6 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { List entries = new ArrayList<>(NamedExpression.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); entries.add(UnsupportedAttribute.ENTRY); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Expression.getNamedWriteables()); return new NamedWriteableRegistry(entries); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java index c625ae5dfb61b..76b813f08d818 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java @@ -15,7 +15,6 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; @@ -56,7 +55,6 @@ protected final ExtraAttribute mutateInstance(ExtraAttribute instance) { protected final NamedWriteableRegistry getNamedWriteableRegistry() { List entries = new ArrayList<>(Attribute.getNamedWriteables()); entries.add(UnsupportedAttribute.ENTRY); - entries.addAll(EsField.getNamedWriteables()); return new NamedWriteableRegistry(entries); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FieldAttributeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FieldAttributeTests.java index 03befe66ac28e..8090a20ddc836 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FieldAttributeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FieldAttributeTests.java @@ -11,9 +11,9 @@ import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.type.AbstractEsFieldTypeTests; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.type.AbstractEsFieldTypeTests; public class FieldAttributeTests extends AbstractAttributeTestCase { public static FieldAttribute createFieldAttribute(int maxDepth, boolean onlyRepresentable) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttributeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttributeTests.java index 4ab2959b37d29..8e5c098c429db 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttributeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttributeTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.UnsupportedEsField; -import org.elasticsearch.xpack.esql.core.type.UnsupportedEsFieldTests; +import org.elasticsearch.xpack.esql.type.UnsupportedEsFieldTests; public class UnsupportedAttributeTests extends AbstractAttributeTestCase { @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java index e1b56d61a211c..504cf4ec1cd12 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java @@ -8,14 +8,16 @@ package org.elasticsearch.xpack.esql.index; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; -import org.elasticsearch.xpack.esql.core.type.EsFieldTests; import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; +import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.type.EsFieldTests; import java.io.IOException; import java.util.HashMap; @@ -56,7 +58,12 @@ private static Set randomConcreteIndices() { @Override protected Writeable.Reader instanceReader() { - return EsIndex::new; + return a -> new EsIndex(new PlanStreamInput(a, new PlanNameRegistry(), a.namedWriteableRegistry(), null)); + } + + @Override + protected Writeable.Writer instanceWriter() { + return (out, idx) -> new PlanStreamOutput(out, new PlanNameRegistry(), null).writeWriteable(idx); } @Override @@ -78,11 +85,6 @@ protected EsIndex mutateInstance(EsIndex instance) throws IOException { return new EsIndex(name, mapping, concreteIndices); } - @Override - protected NamedWriteableRegistry getNamedWriteableRegistry() { - return new NamedWriteableRegistry(EsField.getNamedWriteables()); - } - /** * Build an {@link EsIndex} with many conflicting fields across many indices. */ @@ -136,7 +138,12 @@ public static EsIndex indexWithManyConflicts(boolean withParent) { * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. */ public void testManyTypeConflicts() throws IOException { - testManyTypeConflicts(false, ByteSizeValue.ofBytes(976591)); + testManyTypeConflicts(false, ByteSizeValue.ofBytes(991027)); + /* + * History: + * 953.7kb - shorten error messages for UnsupportedAttributes #111973 + * 967.7kb - cache EsFields #112008 (little overhead of the cache) + */ } /** @@ -144,11 +151,12 @@ public void testManyTypeConflicts() throws IOException { * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. */ public void testManyTypeConflictsWithParent() throws IOException { - testManyTypeConflicts(true, ByteSizeValue.ofBytes(1921374)); + testManyTypeConflicts(true, ByteSizeValue.ofBytes(1374498)); /* * History: * 16.9mb - start * 1.8mb - shorten error messages for UnsupportedAttributes #111973 + * 1.3mb - cache EsFields #112008 */ } @@ -170,8 +178,8 @@ public void testManyTypeConflictsWithParent() throws IOException { *

    */ private void testManyTypeConflicts(boolean withParent, ByteSizeValue expected) throws IOException { - try (BytesStreamOutput out = new BytesStreamOutput()) { - indexWithManyConflicts(withParent).writeTo(out); + try (BytesStreamOutput out = new BytesStreamOutput(); var pso = new PlanStreamOutput(out, new PlanNameRegistry(), null)) { + indexWithManyConflicts(withParent).writeTo(pso); assertThat(ByteSizeValue.ofBytes(out.bytes().length()), byteSizeEquals(expected)); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java index a5f2adbc1fc29..e5f195b053349 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java @@ -269,7 +269,7 @@ static Nullability randomNullability() { }; } - static EsField randomEsField() { + public static EsField randomEsField() { return randomEsField(0); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java index d169cdb5742af..cdb6c5384e16a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java @@ -259,6 +259,42 @@ public void testWriteDifferentAttributesSameID() throws IOException { } } + public void testWriteMultipleEsFields() throws IOException { + Configuration configuration = randomConfiguration(); + try ( + BytesStreamOutput out = new BytesStreamOutput(); + PlanStreamOutput planStream = new PlanStreamOutput(out, PlanNameRegistry.INSTANCE, configuration) + ) { + List fields = new ArrayList<>(); + int occurrences = randomIntBetween(2, 300); + for (int i = 0; i < occurrences; i++) { + fields.add(PlanNamedTypesTests.randomEsField()); + } + + // send all the EsFields, three times + for (int i = 0; i < 3; i++) { + for (EsField attr : fields) { + attr.writeTo(planStream); + } + } + + try (PlanStreamInput in = new PlanStreamInput(out.bytes().streamInput(), PlanNameRegistry.INSTANCE, REGISTRY, configuration)) { + List readFields = new ArrayList<>(); + for (int i = 0; i < occurrences; i++) { + readFields.add(EsField.readFrom(in)); + assertThat(readFields.get(i), equalTo(fields.get(i))); + } + // two more times + for (int i = 0; i < 2; i++) { + for (int j = 0; j < occurrences; j++) { + EsField attr = EsField.readFrom(in); + assertThat(attr, sameInstance(readFields.get(j))); + } + } + } + } + } + private static Attribute randomAttribute() { return switch (randomInt(3)) { case 0 -> PlanNamedTypesTests.randomFieldAttribute(); @@ -293,7 +329,6 @@ private Column randomColumn() { writeables.addAll(Block.getNamedWriteables()); writeables.addAll(Attribute.getNamedWriteables()); writeables.add(UnsupportedAttribute.ENTRY); - writeables.addAll(EsField.getNamedWriteables()); REGISTRY = new NamedWriteableRegistry(new ArrayList<>(new HashSet<>(writeables))); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java index 8562391b2e3b0..1b9df46a1c842 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Node; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.FieldAttributeTests; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.plan.AbstractNodeSerializationTests; @@ -42,7 +41,6 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { entries.addAll(AggregateFunction.getNamedWriteables()); entries.addAll(Expression.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Block.getNamedWriteables()); entries.addAll(NamedExpression.getNamedWriteables()); return new NamedWriteableRegistry(entries); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AbstractPhysicalPlanSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AbstractPhysicalPlanSerializationTests.java index b7b321a022b87..7a0d125ad85ba 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AbstractPhysicalPlanSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AbstractPhysicalPlanSerializationTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Node; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.plan.AbstractNodeSerializationTests; @@ -46,7 +45,6 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { entries.addAll(AggregateFunction.getNamedWriteables()); entries.addAll(Expression.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Block.getNamedWriteables()); entries.addAll(NamedExpression.getNamedWriteables()); entries.addAll(new SearchModule(Settings.EMPTY, List.of()).getNamedWriteables()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java index 237f8d6a9c580..ae58c49eade17 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.index.EsIndex; import org.elasticsearch.xpack.esql.index.EsIndexSerializationTests; @@ -63,7 +62,12 @@ public static Source randomSource() { * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. */ public void testManyTypeConflicts() throws IOException { - testManyTypeConflicts(false, ByteSizeValue.ofBytes(2444252)); + testManyTypeConflicts(false, ByteSizeValue.ofBytes(1897374)); + /* + * History: + * 2.3mb - shorten error messages for UnsupportedAttributes #111973 + * 1.8mb - cache EsFields #112008 + */ } /** @@ -71,12 +75,13 @@ public void testManyTypeConflicts() throws IOException { * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. */ public void testManyTypeConflictsWithParent() throws IOException { - testManyTypeConflicts(true, ByteSizeValue.ofBytes(5885765)); + testManyTypeConflicts(true, ByteSizeValue.ofBytes(3271487)); /* * History: * 2 gb+ - start * 43.3mb - Cache attribute subclasses #111447 * 5.6mb - shorten error messages for UnsupportedAttributes #111973 + * 3.1mb - cache EsFields #112008 */ } @@ -131,7 +136,6 @@ private NamedWriteableRegistry getNamedWriteableRegistry() { entries.addAll(AggregateFunction.getNamedWriteables()); entries.addAll(Expression.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Block.getNamedWriteables()); entries.addAll(NamedExpression.getNamedWriteables()); entries.addAll(new SearchModule(Settings.EMPTY, List.of()).getNamedWriteables()); diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/AbstractEsFieldTypeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/AbstractEsFieldTypeTests.java similarity index 57% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/AbstractEsFieldTypeTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/AbstractEsFieldTypeTests.java index a415c529894c3..9b2bf03b5c8aa 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/AbstractEsFieldTypeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/AbstractEsFieldTypeTests.java @@ -5,16 +5,26 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.test.AbstractNamedWriteableTestCase; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.AbstractWireTestCase; +import org.elasticsearch.xpack.esql.EsqlTestUtils; +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import java.io.IOException; +import java.util.List; import java.util.Map; import java.util.TreeMap; -public abstract class AbstractEsFieldTypeTests extends AbstractNamedWriteableTestCase { +public abstract class AbstractEsFieldTypeTests extends AbstractWireTestCase { public static EsField randomAnyEsField(int maxDepth) { return switch (between(0, 5)) { case 0 -> EsFieldTests.randomEsField(maxDepth); @@ -32,6 +42,25 @@ public static EsField randomAnyEsField(int maxDepth) { protected abstract T mutate(T instance); + @Override + protected EsField copyInstance(EsField instance, TransportVersion version) throws IOException { + NamedWriteableRegistry namedWriteableRegistry = getNamedWriteableRegistry(); + try ( + BytesStreamOutput output = new BytesStreamOutput(); + var pso = new PlanStreamOutput(output, new PlanNameRegistry(), EsqlTestUtils.TEST_CFG) + ) { + pso.setTransportVersion(version); + instance.writeTo(pso); + try ( + StreamInput in1 = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry); + var psi = new PlanStreamInput(in1, new PlanNameRegistry(), in1.namedWriteableRegistry(), EsqlTestUtils.TEST_CFG) + ) { + psi.setTransportVersion(version); + return EsField.readFrom(psi); + } + } + } + /** * Generate sub-properties. * @param maxDepth the maximum number of levels of properties to make @@ -59,11 +88,6 @@ protected final T mutateInstance(EsField instance) throws IOException { @Override protected final NamedWriteableRegistry getNamedWriteableRegistry() { - return new NamedWriteableRegistry(EsField.getNamedWriteables()); - } - - @Override - protected final Class categoryClass() { - return EsField.class; + return new NamedWriteableRegistry(List.of()); } } diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DataTypeConversionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DataTypeConversionTests.java similarity index 99% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DataTypeConversionTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DataTypeConversionTests.java index 929aa1c0eab49..9f8c8f91b7037 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DataTypeConversionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DataTypeConversionTests.java @@ -4,13 +4,16 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.InvalidArgumentException; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.tree.Location; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.Converter; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.DataTypeConverter; import org.elasticsearch.xpack.versionfield.Version; import java.math.BigDecimal; diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DateEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DateEsFieldTests.java similarity index 89% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DateEsFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DateEsFieldTests.java index dea03ee8a8cdf..bf0494d5fd043 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DateEsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DateEsFieldTests.java @@ -5,7 +5,10 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; + +import org.elasticsearch.xpack.esql.core.type.DateEsField; +import org.elasticsearch.xpack.esql.core.type.EsField; import java.util.Map; diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/EsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsFieldTests.java similarity index 91% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/EsFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsFieldTests.java index e72ae0c5c0cda..e824b4de03e26 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/EsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsFieldTests.java @@ -5,7 +5,10 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; + +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.EsField; import java.util.Map; diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/InvalidMappedFieldTests.java similarity index 90% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/InvalidMappedFieldTests.java index 47a99329d0222..c66088b0695d4 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/InvalidMappedFieldTests.java @@ -5,7 +5,10 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; + +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; import java.util.Map; diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/KeywordEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/KeywordEsFieldTests.java similarity index 92% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/KeywordEsFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/KeywordEsFieldTests.java index a5d3b8329b2df..ef04f0e27c096 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/KeywordEsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/KeywordEsFieldTests.java @@ -5,9 +5,11 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.core.type.KeywordEsField; import java.util.Map; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java index 618ca812005f8..d4ca40b75d2f3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java @@ -9,13 +9,14 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.test.AbstractNamedWriteableTestCase; +import org.elasticsearch.test.AbstractWireTestCase; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.core.type.MultiTypeEsField; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToBoolean; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianPoint; @@ -57,7 +58,7 @@ * These differences can be minimized once Expression is fully supported in the new serialization approach, and the esql and esql.core * modules are merged, or at least the relevant classes are moved. */ -public class MultiTypeEsFieldTests extends AbstractNamedWriteableTestCase { +public class MultiTypeEsFieldTests extends AbstractWireTestCase { private Configuration config; @@ -94,26 +95,19 @@ protected MultiTypeEsField mutateInstance(MultiTypeEsField instance) throws IOEx protected final NamedWriteableRegistry getNamedWriteableRegistry() { List entries = new ArrayList<>(UnaryScalarFunction.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); - entries.addAll(EsField.getNamedWriteables()); - entries.add(MultiTypeEsField.ENTRY); entries.addAll(Expression.getNamedWriteables()); return new NamedWriteableRegistry(entries); } - @Override - protected final Class categoryClass() { - return MultiTypeEsField.class; - } - @Override protected final MultiTypeEsField copyInstance(MultiTypeEsField instance, TransportVersion version) throws IOException { return copyInstance( instance, getNamedWriteableRegistry(), - (out, v) -> new PlanStreamOutput(out, new PlanNameRegistry(), config).writeNamedWriteable(v), + (out, v) -> v.writeTo(new PlanStreamOutput(out, new PlanNameRegistry(), config)), in -> { PlanStreamInput pin = new PlanStreamInput(in, new PlanNameRegistry(), in.namedWriteableRegistry(), config); - return (MultiTypeEsField) pin.readNamedWriteable(EsField.class); + return EsField.readFrom(pin); }, version ); diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/TextEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/TextEsFieldTests.java similarity index 90% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/TextEsFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/TextEsFieldTests.java index 817dd7cd27094..9af3b7376f2b2 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/TextEsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/TextEsFieldTests.java @@ -5,7 +5,10 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; + +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.core.type.TextEsField; import java.util.Map; diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/UnsupportedEsFieldTests.java similarity index 91% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/UnsupportedEsFieldTests.java index e05d8ca10425e..a89ca9481b7e1 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/UnsupportedEsFieldTests.java @@ -5,7 +5,10 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; + +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.core.type.UnsupportedEsField; import java.util.Map; From 73c5c1e1c587cc7ec7ce1f0d10fea49ecfd39002 Mon Sep 17 00:00:00 2001 From: Chris Berkhout Date: Tue, 27 Aug 2024 11:35:53 +0200 Subject: [PATCH 223/389] ByteArrayStreamInput: Return -1 when there are no more bytes to read (#112214) --- docs/changelog/112214.yaml | 5 +++++ .../common/io/stream/ByteArrayStreamInput.java | 6 +++++- .../elasticsearch/common/io/stream/AbstractStreamTests.java | 1 + 3 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/112214.yaml diff --git a/docs/changelog/112214.yaml b/docs/changelog/112214.yaml new file mode 100644 index 0000000000000..430f95a72bb3f --- /dev/null +++ b/docs/changelog/112214.yaml @@ -0,0 +1,5 @@ +pr: 112214 +summary: '`ByteArrayStreamInput:` Return -1 when there are no more bytes to read' +area: Infra/Core +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java index 838f2998d339f..a27eec4c12061 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java @@ -120,7 +120,11 @@ public void readBytes(byte[] b, int offset, int len) { @Override public int read(byte[] b, int off, int len) throws IOException { - int toRead = Math.min(len, available()); + final int available = limit - pos; + if (available <= 0) { + return -1; + } + int toRead = Math.min(len, available); readBytes(b, off, toRead); return toRead; } diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java index 8451d2fd64b9c..b1104a72400ea 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java @@ -723,6 +723,7 @@ public void testReadAfterReachingEndOfStream() throws IOException { input.readBytes(new byte[len], 0, len); assertEquals(-1, input.read()); + assertEquals(-1, input.read(new byte[2], 0, 2)); } } From fb32adcb174a7f32338b55737c8273fd962fefdd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Tue, 27 Aug 2024 14:10:05 +0200 Subject: [PATCH 224/389] Add manage roles privilege (#110633) This PR adds functionality to limit the resources and privileges an Elasticsearch user can grant permissions to when creating a role. This is achieved using a new [global](https://www.elastic.co/guide/en/elasticsearch/reference/current/defining-roles.html) (configurable/request aware) cluster privilege , named `role`, with a sub-key called `manage/indices` which is an array where each entry is a pair of [index patterns](https://docs.google.com/document/d/1VN73C2KpmvvOW85-XGUqMmnMwXrfK4aoxRtG8tPqk7Y/edit#heading=h.z74zwo30t0pf) and [index privileges](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-privileges.html#privileges-list-indices). ## Definition - Using a role with this privilege to create, update or delete roles with privileges on indices outside of the indices matched by the [index pattern](https://docs.google.com/document/d/1VN73C2KpmvvOW85-XGUqMmnMwXrfK4aoxRtG8tPqk7Y/edit#heading=h.z74zwo30t0pf) in the indices array, will fail. - Using a role with this privilege to try to create, update or delete roles with cluster, run_as, etc. privileges will fail. - Using a role with this privilege with restricted indices will fail. - Other broader privileges (such as manage_security) will nullify this privilege. ## Example Create `test-manage` role: ``` POST _security/role/test-manage { "global": { "role": { "manage": { "indices": [ { "names": ["allowed-index-prefix-*"], "privileges":["read"] } ] } } } } ``` And then a user with that role creates a role: ``` POST _security/role/a-test-role { "indices": [ { "names": [ "allowed-index-prefix-some-index" ], "privileges": [ "read" ]}] } ``` But this would fail for: ``` POST _security/role/a-test-role { "indices": [ { "names": [ "not-allowed-index-prefix-some-index" ], "privileges": [ "read" ]}] } ``` ## Backwards compatibility and mixed cluster concerns - A new mapping version has been added to the security index to store the new privilege. - If the new mapping version is not applied and a role descriptor with the new global privilege is written, the write will fail causing an exception. - When sending role descriptors over the transport layer in a mixed cluster, the new global privilege needs to be excluded for older versions. This is hanled with a new transport version. - If a role descriptor is serialized for API keys on one node in a mixed cluster and read from another, an older node might not be able to deserialize it, so it needs to be removed before being written in mixed cluster with old nodes. This is handled in the API key service. - If a role descriptor containing a global privilege is in a put role request in a mixed cluster where it's not supported on all nodes, fail request to create role. - RCS is not applicable here since RCS only considers cluster privileges and index privileges (not global cluster privileges). - This doesn't include remote privileges, since the current use case with connectors doesn't need roles to be created on a cluster separate from the cluster where the search data resides. ## Follow up work - Create a docs PR - Error handling for actions that use manage roles. Should configurable cluster privileges that grant restricted usage of actions be listed in error authorization error messages? --- docs/changelog/110633.yaml | 5 + .../org/elasticsearch/TransportVersions.java | 1 + .../xpack/core/XPackClientPlugin.java | 7 +- .../authz/permission/ClusterPermission.java | 22 ++ .../authz/permission/IndicesPermission.java | 87 ++++- .../core/security/authz/permission/Role.java | 2 +- .../ConfigurableClusterPrivilege.java | 3 +- .../ConfigurableClusterPrivileges.java | 319 +++++++++++++++- .../authz/RoleDescriptorTestHelper.java | 35 +- .../RoleDescriptorsIntersectionTests.java | 5 + .../ConfigurableClusterPrivilegesTests.java | 8 +- .../privilege/ManageRolesPrivilegesTests.java | 351 ++++++++++++++++++ .../security/ManageRolesPrivilegeIT.java | 211 +++++++++++ .../xpack/security/apikey/ApiKeyRestIT.java | 67 ++++ .../xpack/security/authc/ApiKeyService.java | 125 ++++--- .../authz/store/NativeRolesStore.java | 11 +- .../support/SecuritySystemIndices.java | 40 ++ .../audit/logfile/LoggingAuditTrailTests.java | 10 +- .../security/audit/logfile/audited_roles.txt | 4 +- .../RolesBackwardsCompatibilityIT.java | 186 ++++++++-- 20 files changed, 1397 insertions(+), 102 deletions(-) create mode 100644 docs/changelog/110633.yaml create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageRolesPrivilegesTests.java create mode 100644 x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/ManageRolesPrivilegeIT.java diff --git a/docs/changelog/110633.yaml b/docs/changelog/110633.yaml new file mode 100644 index 0000000000000..d4d1dc68cdbcc --- /dev/null +++ b/docs/changelog/110633.yaml @@ -0,0 +1,5 @@ +pr: 110633 +summary: Add manage roles privilege +area: Authorization +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 33f483c57b54e..582c618216999 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -198,6 +198,7 @@ static TransportVersion def(int id) { public static final TransportVersion ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT = def(8_728_00_0); public static final TransportVersion RANK_DOCS_RETRIEVER = def(8_729_00_0); public static final TransportVersion ESQL_ES_FIELD_CACHED_SERIALIZATION = def(8_730_00_0); + public static final TransportVersion ADD_MANAGE_ROLES_PRIVILEGE = def(8_731_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index a2c3e40c76ae4..2e806a24ad469 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -149,7 +149,7 @@ public List getNamedWriteables() { new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TokenMetadata.TYPE, TokenMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, TokenMetadata.TYPE, TokenMetadata::readDiffFrom), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SECURITY, SecurityFeatureSetUsage::new), - // security : conditional privileges + // security : configurable cluster privileges new NamedWriteableRegistry.Entry( ConfigurableClusterPrivilege.class, ConfigurableClusterPrivileges.ManageApplicationPrivileges.WRITEABLE_NAME, @@ -160,6 +160,11 @@ public List getNamedWriteables() { ConfigurableClusterPrivileges.WriteProfileDataPrivileges.WRITEABLE_NAME, ConfigurableClusterPrivileges.WriteProfileDataPrivileges::createFrom ), + new NamedWriteableRegistry.Entry( + ConfigurableClusterPrivilege.class, + ConfigurableClusterPrivileges.ManageRolesPrivilege.WRITEABLE_NAME, + ConfigurableClusterPrivileges.ManageRolesPrivilege::createFrom + ), // security : role-mappings new NamedWriteableRegistry.Entry(Metadata.Custom.class, RoleMappingMetadata.TYPE, RoleMappingMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, RoleMappingMetadata.TYPE, RoleMappingMetadata::readDiffFrom), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java index c70f2a05bfe93..9c41786f39eeb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authz.RestrictedIndices; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; import org.elasticsearch.xpack.core.security.support.Automatons; @@ -17,6 +18,7 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.function.Function; import java.util.function.Predicate; /** @@ -84,6 +86,16 @@ public static class Builder { private final List actionAutomatons = new ArrayList<>(); private final List permissionChecks = new ArrayList<>(); + private final RestrictedIndices restrictedIndices; + + public Builder(RestrictedIndices restrictedIndices) { + this.restrictedIndices = restrictedIndices; + } + + public Builder() { + this.restrictedIndices = null; + } + public Builder add( final ClusterPrivilege clusterPrivilege, final Set allowedActionPatterns, @@ -110,6 +122,16 @@ public Builder add(final ClusterPrivilege clusterPrivilege, final PermissionChec return this; } + public Builder addWithPredicateSupplier( + final ClusterPrivilege clusterPrivilege, + final Set allowedActionPatterns, + final Function> requestPredicateSupplier + ) { + final Automaton actionAutomaton = createAutomaton(allowedActionPatterns, Set.of()); + Predicate requestPredicate = requestPredicateSupplier.apply(restrictedIndices); + return add(clusterPrivilege, new ActionRequestBasedPermissionCheck(clusterPrivilege, actionAutomaton, requestPredicate)); + } + public ClusterPermission build() { if (clusterPrivileges.isEmpty()) { return NONE; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java index d29b1dd67757a..e1b72cc43b38e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.xpack.core.security.authz.RestrictedIndices; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; @@ -86,6 +87,7 @@ public Builder addGroup( public IndicesPermission build() { return new IndicesPermission(restrictedIndices, groups.toArray(Group.EMPTY_ARRAY)); } + } private IndicesPermission(RestrictedIndices restrictedIndices, Group[] groups) { @@ -238,6 +240,21 @@ public boolean check(String action) { return false; } + public boolean checkResourcePrivileges( + Set checkForIndexPatterns, + boolean allowRestrictedIndices, + Set checkForPrivileges, + @Nullable ResourcePrivilegesMap.Builder resourcePrivilegesMapBuilder + ) { + return checkResourcePrivileges( + checkForIndexPatterns, + allowRestrictedIndices, + checkForPrivileges, + false, + resourcePrivilegesMapBuilder + ); + } + /** * For given index patterns and index privileges determines allowed privileges and creates an instance of {@link ResourcePrivilegesMap} * holding a map of resource to {@link ResourcePrivileges} where resource is index pattern and the map of index privilege to whether it @@ -246,6 +263,7 @@ public boolean check(String action) { * @param checkForIndexPatterns check permission grants for the set of index patterns * @param allowRestrictedIndices if {@code true} then checks permission grants even for restricted indices by index matching * @param checkForPrivileges check permission grants for the set of index privileges + * @param combineIndexGroups combine index groups to enable checking against regular expressions * @param resourcePrivilegesMapBuilder out-parameter for returning the details on which privilege over which resource is granted or not. * Can be {@code null} when no such details are needed so the method can return early, after * encountering the first privilege that is not granted over some resource. @@ -255,10 +273,13 @@ public boolean checkResourcePrivileges( Set checkForIndexPatterns, boolean allowRestrictedIndices, Set checkForPrivileges, + boolean combineIndexGroups, @Nullable ResourcePrivilegesMap.Builder resourcePrivilegesMapBuilder ) { - final Map predicateCache = new HashMap<>(); boolean allMatch = true; + Map indexGroupAutomatons = indexGroupAutomatons( + combineIndexGroups && checkForIndexPatterns.stream().anyMatch(Automatons::isLuceneRegex) + ); for (String forIndexPattern : checkForIndexPatterns) { Automaton checkIndexAutomaton = Automatons.patterns(forIndexPattern); if (false == allowRestrictedIndices && false == isConcreteRestrictedIndex(forIndexPattern)) { @@ -266,15 +287,14 @@ public boolean checkResourcePrivileges( } if (false == Operations.isEmpty(checkIndexAutomaton)) { Automaton allowedIndexPrivilegesAutomaton = null; - for (Group group : groups) { - final Automaton groupIndexAutomaton = predicateCache.computeIfAbsent(group, Group::getIndexMatcherAutomaton); - if (Operations.subsetOf(checkIndexAutomaton, groupIndexAutomaton)) { + for (var indexAndPrivilegeAutomaton : indexGroupAutomatons.entrySet()) { + if (Operations.subsetOf(checkIndexAutomaton, indexAndPrivilegeAutomaton.getValue())) { if (allowedIndexPrivilegesAutomaton != null) { allowedIndexPrivilegesAutomaton = Automatons.unionAndMinimize( - Arrays.asList(allowedIndexPrivilegesAutomaton, group.privilege().getAutomaton()) + Arrays.asList(allowedIndexPrivilegesAutomaton, indexAndPrivilegeAutomaton.getKey()) ); } else { - allowedIndexPrivilegesAutomaton = group.privilege().getAutomaton(); + allowedIndexPrivilegesAutomaton = indexAndPrivilegeAutomaton.getKey(); } } } @@ -656,6 +676,61 @@ private static boolean containsPrivilegeThatGrantsMappingUpdatesForBwc(Group gro return group.privilege().name().stream().anyMatch(PRIVILEGE_NAME_SET_BWC_ALLOW_MAPPING_UPDATE::contains); } + /** + * Get all automatons for the index groups in this permission and optionally combine the index groups to enable checking if a set of + * index patterns specified using a regular expression grants a set of index privileges. + * + *

    An index group is defined as a set of index patterns and a set of privileges (excluding field permissions and DLS queries). + * {@link IndicesPermission} consist of a set of index groups. For non-regular expression privilege checks, an index pattern is checked + * against each index group, to see if it's a sub-pattern of the index pattern for the group and then if that group grants some or all + * of the privileges requested. For regular expressions it's not sufficient to check per group since the index patterns covered by a + * group can be distinct sets and a regular expression can cover several distinct sets. + * + *

    For example the two index groups: {"names": ["a"], "privileges": ["read", "create"]} and {"names": ["b"], + * "privileges": ["read","delete"]} will not match on ["\[ab]\"], while a single index group: + * {"names": ["a", "b"], "privileges": ["read"]} will. This happens because the index groups are evaluated against a request index + * pattern without first being combined. In the example above, the two index patterns should be combined to: + * {"names": ["a", "b"], "privileges": ["read"]} before being checked. + * + * + * @param combine combine index groups to allow for checking against regular expressions + * + * @return a map of all index and privilege pattern automatons + */ + private Map indexGroupAutomatons(boolean combine) { + // Map of privilege automaton object references (cached by IndexPrivilege::CACHE) + Map allAutomatons = new HashMap<>(); + for (Group group : groups) { + Automaton indexAutomaton = group.getIndexMatcherAutomaton(); + allAutomatons.compute( + group.privilege().getAutomaton(), + (key, value) -> value == null ? indexAutomaton : Automatons.unionAndMinimize(List.of(value, indexAutomaton)) + ); + if (combine) { + List> combinedAutomatons = new ArrayList<>(); + for (var indexAndPrivilegeAutomatons : allAutomatons.entrySet()) { + Automaton intersectingPrivileges = Operations.intersection( + indexAndPrivilegeAutomatons.getKey(), + group.privilege().getAutomaton() + ); + if (Operations.isEmpty(intersectingPrivileges) == false) { + Automaton indexPatternAutomaton = Automatons.unionAndMinimize( + List.of(indexAndPrivilegeAutomatons.getValue(), indexAutomaton) + ); + combinedAutomatons.add(new Tuple<>(intersectingPrivileges, indexPatternAutomaton)); + } + } + combinedAutomatons.forEach( + automatons -> allAutomatons.compute( + automatons.v1(), + (key, value) -> value == null ? automatons.v2() : Automatons.unionAndMinimize(List.of(value, automatons.v2())) + ) + ); + } + } + return allAutomatons; + } + public static class Group { public static final Group[] EMPTY_ARRAY = new Group[0]; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java index 0fc04e8cc9a52..d8d56a4fbb247 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java @@ -233,7 +233,7 @@ private Builder(RestrictedIndices restrictedIndices, String[] names) { } public Builder cluster(Set privilegeNames, Iterable configurableClusterPrivileges) { - ClusterPermission.Builder builder = ClusterPermission.builder(); + ClusterPermission.Builder builder = new ClusterPermission.Builder(restrictedIndices); if (privilegeNames.isEmpty() == false) { for (String name : privilegeNames) { builder = ClusterPrivilegeResolver.resolve(name).buildPermission(builder); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilege.java index f9722ca42f20d..edb0cb8f9e79d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilege.java @@ -41,7 +41,8 @@ public interface ConfigurableClusterPrivilege extends NamedWriteable, ToXContent */ enum Category { APPLICATION(new ParseField("application")), - PROFILE(new ParseField("profile")); + PROFILE(new ParseField("profile")), + ROLE(new ParseField("role")); public final ParseField field; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java index fed8b7e0d7a1c..b93aa079a28d2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java @@ -7,6 +7,9 @@ package org.elasticsearch.xpack.core.security.authz.privilege; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -17,10 +20,21 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.security.action.ActionTypes; import org.elasticsearch.xpack.core.security.action.privilege.ApplicationPrivilegesRequest; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataAction; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkDeleteRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkPutRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleAction; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; +import org.elasticsearch.xpack.core.security.action.role.PutRoleAction; +import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; +import org.elasticsearch.xpack.core.security.authz.RestrictedIndices; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import org.elasticsearch.xpack.core.security.authz.permission.IndicesPermission; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege.Category; import org.elasticsearch.xpack.core.security.support.StringMatcher; import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; @@ -30,12 +44,18 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; +import java.util.TreeMap; +import java.util.function.Function; import java.util.function.Predicate; +import static org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege.DELETE_INDEX; + /** * Static utility class for working with {@link ConfigurableClusterPrivilege} instances */ @@ -43,6 +63,7 @@ public final class ConfigurableClusterPrivileges { public static final ConfigurableClusterPrivilege[] EMPTY_ARRAY = new ConfigurableClusterPrivilege[0]; + private static final Logger logger = LogManager.getLogger(ConfigurableClusterPrivileges.class); public static final Writeable.Reader READER = in1 -> in1.readNamedWriteable( ConfigurableClusterPrivilege.class ); @@ -61,7 +82,16 @@ public static ConfigurableClusterPrivilege[] readArray(StreamInput in) throws IO * Utility method to write an array of {@link ConfigurableClusterPrivilege} objects to a {@link StreamOutput} */ public static void writeArray(StreamOutput out, ConfigurableClusterPrivilege[] privileges) throws IOException { - out.writeArray(WRITER, privileges); + if (out.getTransportVersion().onOrAfter(TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE)) { + out.writeArray(WRITER, privileges); + } else { + out.writeArray( + WRITER, + Arrays.stream(privileges) + .filter(privilege -> privilege instanceof ManageRolesPrivilege == false) + .toArray(ConfigurableClusterPrivilege[]::new) + ); + } } /** @@ -97,7 +127,7 @@ public static List parse(XContentParser parser) th while (parser.nextToken() != XContentParser.Token.END_OBJECT) { expectedToken(parser.currentToken(), parser, XContentParser.Token.FIELD_NAME); - expectFieldName(parser, Category.APPLICATION.field, Category.PROFILE.field); + expectFieldName(parser, Category.APPLICATION.field, Category.PROFILE.field, Category.ROLE.field); if (Category.APPLICATION.field.match(parser.currentName(), parser.getDeprecationHandler())) { expectedToken(parser.nextToken(), parser, XContentParser.Token.START_OBJECT); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { @@ -106,8 +136,7 @@ public static List parse(XContentParser parser) th expectFieldName(parser, ManageApplicationPrivileges.Fields.MANAGE); privileges.add(ManageApplicationPrivileges.parse(parser)); } - } else { - assert Category.PROFILE.field.match(parser.currentName(), parser.getDeprecationHandler()); + } else if (Category.PROFILE.field.match(parser.currentName(), parser.getDeprecationHandler())) { expectedToken(parser.nextToken(), parser, XContentParser.Token.START_OBJECT); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { expectedToken(parser.currentToken(), parser, XContentParser.Token.FIELD_NAME); @@ -115,9 +144,16 @@ public static List parse(XContentParser parser) th expectFieldName(parser, WriteProfileDataPrivileges.Fields.WRITE); privileges.add(WriteProfileDataPrivileges.parse(parser)); } + } else if (Category.ROLE.field.match(parser.currentName(), parser.getDeprecationHandler())) { + expectedToken(parser.nextToken(), parser, XContentParser.Token.START_OBJECT); + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + expectedToken(parser.currentToken(), parser, XContentParser.Token.FIELD_NAME); + + expectFieldName(parser, ManageRolesPrivilege.Fields.MANAGE); + privileges.add(ManageRolesPrivilege.parse(parser)); + } } } - return privileges; } @@ -362,4 +398,277 @@ private interface Fields { ParseField APPLICATIONS = new ParseField("applications"); } } + + public static class ManageRolesPrivilege implements ConfigurableClusterPrivilege { + public static final String WRITEABLE_NAME = "manage-roles-privilege"; + private final List indexPermissionGroups; + private final Function> requestPredicateSupplier; + + private static final Set EXPECTED_INDEX_GROUP_FIELDS = Set.of( + Fields.NAMES.getPreferredName(), + Fields.PRIVILEGES.getPreferredName() + ); + + public ManageRolesPrivilege(List manageRolesIndexPermissionGroups) { + this.indexPermissionGroups = manageRolesIndexPermissionGroups; + this.requestPredicateSupplier = (restrictedIndices) -> { + IndicesPermission.Builder indicesPermissionBuilder = new IndicesPermission.Builder(restrictedIndices); + for (ManageRolesIndexPermissionGroup indexPatternPrivilege : manageRolesIndexPermissionGroups) { + indicesPermissionBuilder.addGroup( + IndexPrivilege.get(Set.of(indexPatternPrivilege.privileges())), + FieldPermissions.DEFAULT, + null, + false, + indexPatternPrivilege.indexPatterns() + ); + } + final IndicesPermission indicesPermission = indicesPermissionBuilder.build(); + + return (TransportRequest request) -> { + if (request instanceof final PutRoleRequest putRoleRequest) { + return hasNonIndexPrivileges(putRoleRequest.roleDescriptor()) == false + && Arrays.stream(putRoleRequest.indices()) + .noneMatch( + indexPrivilege -> requestIndexPatternsAllowed( + indicesPermission, + indexPrivilege.getIndices(), + indexPrivilege.getPrivileges() + ) == false + ); + } else if (request instanceof final BulkPutRolesRequest bulkPutRoleRequest) { + return bulkPutRoleRequest.getRoles().stream().noneMatch(ManageRolesPrivilege::hasNonIndexPrivileges) + && bulkPutRoleRequest.getRoles() + .stream() + .allMatch( + roleDescriptor -> Arrays.stream(roleDescriptor.getIndicesPrivileges()) + .noneMatch( + indexPrivilege -> requestIndexPatternsAllowed( + indicesPermission, + indexPrivilege.getIndices(), + indexPrivilege.getPrivileges() + ) == false + ) + ); + } else if (request instanceof final DeleteRoleRequest deleteRoleRequest) { + return requestIndexPatternsAllowed( + indicesPermission, + new String[] { deleteRoleRequest.name() }, + DELETE_INDEX.name().toArray(String[]::new) + ); + } else if (request instanceof final BulkDeleteRolesRequest bulkDeleteRoleRequest) { + return requestIndexPatternsAllowed( + indicesPermission, + bulkDeleteRoleRequest.getRoleNames().toArray(String[]::new), + DELETE_INDEX.name().toArray(String[]::new) + ); + } + throw new IllegalArgumentException("Unsupported request type [" + request.getClass() + "]"); + }; + }; + } + + @Override + public Category getCategory() { + return Category.ROLE; + } + + @Override + public String getWriteableName() { + return WRITEABLE_NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(indexPermissionGroups); + } + + public static ManageRolesPrivilege createFrom(StreamInput in) throws IOException { + final List indexPatternPrivileges = in.readCollectionAsList( + ManageRolesIndexPermissionGroup::createFrom + ); + return new ManageRolesPrivilege(indexPatternPrivileges); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field( + Fields.MANAGE.getPreferredName(), + Map.of(Fields.INDICES.getPreferredName(), indexPermissionGroups.stream().map(indexPatternPrivilege -> { + Map sortedMap = new TreeMap<>(); + sortedMap.put(Fields.NAMES.getPreferredName(), indexPatternPrivilege.indexPatterns()); + sortedMap.put(Fields.PRIVILEGES.getPreferredName(), indexPatternPrivilege.privileges()); + return sortedMap; + }).toList()) + ); + } + + private static void expectedIndexGroupFields(String fieldName, XContentParser parser) { + if (EXPECTED_INDEX_GROUP_FIELDS.contains(fieldName) == false) { + throw new XContentParseException( + parser.getTokenLocation(), + "failed to parse privilege. expected one of " + + Arrays.toString(EXPECTED_INDEX_GROUP_FIELDS.toArray(String[]::new)) + + " but found [" + + fieldName + + "] instead" + ); + } + } + + public static ManageRolesPrivilege parse(XContentParser parser) throws IOException { + expectedToken(parser.currentToken(), parser, XContentParser.Token.FIELD_NAME); + expectFieldName(parser, Fields.MANAGE); + expectedToken(parser.nextToken(), parser, XContentParser.Token.START_OBJECT); + expectedToken(parser.nextToken(), parser, XContentParser.Token.FIELD_NAME); + expectFieldName(parser, Fields.INDICES); + expectedToken(parser.nextToken(), parser, XContentParser.Token.START_ARRAY); + List indexPrivileges = new ArrayList<>(); + Map parsedArraysByFieldName = new HashMap<>(); + + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + expectedToken(token, parser, XContentParser.Token.START_OBJECT); + expectedToken(parser.nextToken(), parser, XContentParser.Token.FIELD_NAME); + String currentFieldName = parser.currentName(); + expectedIndexGroupFields(currentFieldName, parser); + expectedToken(parser.nextToken(), parser, XContentParser.Token.START_ARRAY); + parsedArraysByFieldName.put(currentFieldName, XContentUtils.readStringArray(parser, false)); + expectedToken(parser.nextToken(), parser, XContentParser.Token.FIELD_NAME); + currentFieldName = parser.currentName(); + expectedIndexGroupFields(currentFieldName, parser); + expectedToken(parser.nextToken(), parser, XContentParser.Token.START_ARRAY); + parsedArraysByFieldName.put(currentFieldName, XContentUtils.readStringArray(parser, false)); + expectedToken(parser.nextToken(), parser, XContentParser.Token.END_OBJECT); + indexPrivileges.add( + new ManageRolesIndexPermissionGroup( + parsedArraysByFieldName.get(Fields.NAMES.getPreferredName()), + parsedArraysByFieldName.get(Fields.PRIVILEGES.getPreferredName()) + ) + ); + } + expectedToken(parser.nextToken(), parser, XContentParser.Token.END_OBJECT); + + for (var indexPrivilege : indexPrivileges) { + if (indexPrivilege.indexPatterns == null || indexPrivilege.indexPatterns.length == 0) { + throw new IllegalArgumentException("Indices privileges must refer to at least one index name or index name pattern"); + } + if (indexPrivilege.privileges == null || indexPrivilege.privileges.length == 0) { + throw new IllegalArgumentException("Indices privileges must define at least one privilege"); + } + } + return new ManageRolesPrivilege(indexPrivileges); + } + + public record ManageRolesIndexPermissionGroup(String[] indexPatterns, String[] privileges) implements Writeable { + public static ManageRolesIndexPermissionGroup createFrom(StreamInput in) throws IOException { + return new ManageRolesIndexPermissionGroup(in.readStringArray(), in.readStringArray()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringArray(indexPatterns); + out.writeStringArray(privileges); + } + + @Override + public String toString() { + return "{" + + Fields.NAMES + + ":" + + Arrays.toString(indexPatterns()) + + ":" + + Fields.PRIVILEGES + + ":" + + Arrays.toString(privileges()) + + "}"; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ManageRolesIndexPermissionGroup that = (ManageRolesIndexPermissionGroup) o; + return Arrays.equals(indexPatterns, that.indexPatterns) && Arrays.equals(privileges, that.privileges); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indexPatterns), Arrays.hashCode(privileges)); + } + } + + @Override + public String toString() { + return "{" + + getCategory() + + ":" + + Fields.MANAGE.getPreferredName() + + ":" + + Fields.INDICES.getPreferredName() + + "=[" + + Strings.collectionToDelimitedString(indexPermissionGroups, ",") + + "]}"; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ManageRolesPrivilege that = (ManageRolesPrivilege) o; + + if (this.indexPermissionGroups.size() != that.indexPermissionGroups.size()) { + return false; + } + + for (int i = 0; i < this.indexPermissionGroups.size(); i++) { + if (Objects.equals(this.indexPermissionGroups.get(i), that.indexPermissionGroups.get(i)) == false) { + return false; + } + } + return true; + } + + @Override + public int hashCode() { + return Objects.hash(indexPermissionGroups.hashCode()); + } + + @Override + public ClusterPermission.Builder buildPermission(final ClusterPermission.Builder builder) { + return builder.addWithPredicateSupplier( + this, + Set.of(PutRoleAction.NAME, ActionTypes.BULK_PUT_ROLES.name(), ActionTypes.BULK_DELETE_ROLES.name(), DeleteRoleAction.NAME), + requestPredicateSupplier + ); + } + + private static boolean requestIndexPatternsAllowed( + IndicesPermission indicesPermission, + String[] requestIndexPatterns, + String[] privileges + ) { + return indicesPermission.checkResourcePrivileges(Set.of(requestIndexPatterns), false, Set.of(privileges), true, null); + } + + private static boolean hasNonIndexPrivileges(RoleDescriptor roleDescriptor) { + return roleDescriptor.hasApplicationPrivileges() + || roleDescriptor.hasClusterPrivileges() + || roleDescriptor.hasConfigurableClusterPrivileges() + || roleDescriptor.hasRemoteIndicesPrivileges() + || roleDescriptor.hasRemoteClusterPermissions() + || roleDescriptor.hasRunAs() + || roleDescriptor.hasWorkflowsRestriction(); + } + + private interface Fields { + ParseField MANAGE = new ParseField("manage"); + ParseField INDICES = new ParseField("indices"); + ParseField PRIVILEGES = new ParseField("privileges"); + ParseField NAMES = new ParseField("names"); + } + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTestHelper.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTestHelper.java index 2d8b62335f4ef..77a37cec45b25 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTestHelper.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTestHelper.java @@ -26,6 +26,7 @@ import static org.elasticsearch.test.ESTestCase.generateRandomStringArray; import static org.elasticsearch.test.ESTestCase.randomAlphaOfLengthBetween; +import static org.elasticsearch.test.ESTestCase.randomArray; import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.elasticsearch.test.ESTestCase.randomInt; import static org.elasticsearch.test.ESTestCase.randomIntBetween; @@ -52,6 +53,7 @@ public static RoleDescriptor randomRoleDescriptor() { .allowRestriction(randomBoolean()) .allowDescription(randomBoolean()) .allowRemoteClusters(randomBoolean()) + .allowConfigurableClusterPrivileges(randomBoolean()) .build(); } @@ -69,7 +71,7 @@ public static Map randomRoleDescriptorMetadata(boolean allowRese } public static ConfigurableClusterPrivilege[] randomClusterPrivileges() { - final ConfigurableClusterPrivilege[] configurableClusterPrivileges = switch (randomIntBetween(0, 4)) { + return switch (randomIntBetween(0, 5)) { case 0 -> new ConfigurableClusterPrivilege[0]; case 1 -> new ConfigurableClusterPrivilege[] { new ConfigurableClusterPrivileges.ManageApplicationPrivileges( @@ -93,9 +95,9 @@ public static ConfigurableClusterPrivilege[] randomClusterPrivileges() { new ConfigurableClusterPrivileges.WriteProfileDataPrivileges( Sets.newHashSet(generateRandomStringArray(3, randomIntBetween(4, 12), false, false)) ) }; + case 5 -> randomManageRolesPrivileges(); default -> throw new IllegalStateException("Unexpected value"); }; - return configurableClusterPrivileges; } public static RoleDescriptor.ApplicationResourcePrivileges[] randomApplicationPrivileges() { @@ -119,6 +121,27 @@ public static RoleDescriptor.ApplicationResourcePrivileges[] randomApplicationPr return applicationPrivileges; } + public static ConfigurableClusterPrivilege[] randomManageRolesPrivileges() { + List indexPatternPrivileges = randomList( + 1, + 10, + () -> { + String[] indexPatterns = randomArray(1, 5, String[]::new, () -> randomAlphaOfLengthBetween(5, 100)); + + int startIndex = randomIntBetween(0, IndexPrivilege.names().size() - 2); + int endIndex = randomIntBetween(startIndex + 1, IndexPrivilege.names().size()); + + String[] indexPrivileges = IndexPrivilege.names().stream().toList().subList(startIndex, endIndex).toArray(String[]::new); + return new ConfigurableClusterPrivileges.ManageRolesPrivilege.ManageRolesIndexPermissionGroup( + indexPatterns, + indexPrivileges + ); + } + ); + + return new ConfigurableClusterPrivilege[] { new ConfigurableClusterPrivileges.ManageRolesPrivilege(indexPatternPrivileges) }; + } + public static RoleDescriptor.RemoteIndicesPrivileges[] randomRemoteIndicesPrivileges(int min, int max) { return randomRemoteIndicesPrivileges(min, max, Set.of()); } @@ -251,6 +274,7 @@ public static class Builder { private boolean allowRestriction = false; private boolean allowDescription = false; private boolean allowRemoteClusters = false; + private boolean allowConfigurableClusterPrivileges = false; public Builder() {} @@ -259,6 +283,11 @@ public Builder allowReservedMetadata(boolean allowReservedMetadata) { return this; } + public Builder allowConfigurableClusterPrivileges(boolean allowConfigurableClusterPrivileges) { + this.allowConfigurableClusterPrivileges = allowConfigurableClusterPrivileges; + return this; + } + public Builder alwaysIncludeRemoteIndices() { this.alwaysIncludeRemoteIndices = true; return this; @@ -302,7 +331,7 @@ public RoleDescriptor build() { randomSubsetOf(ClusterPrivilegeResolver.names()).toArray(String[]::new), randomIndicesPrivileges(0, 3), randomApplicationPrivileges(), - randomClusterPrivileges(), + allowConfigurableClusterPrivileges ? randomClusterPrivileges() : null, generateRandomStringArray(5, randomIntBetween(2, 8), false, true), randomRoleDescriptorMetadata(allowReservedMetadata), Map.of(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersectionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersectionTests.java index a892e8b864e6e..b67292e76961f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersectionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersectionTests.java @@ -48,6 +48,11 @@ public void testSerialization() throws IOException { ConfigurableClusterPrivilege.class, ConfigurableClusterPrivileges.WriteProfileDataPrivileges.WRITEABLE_NAME, ConfigurableClusterPrivileges.WriteProfileDataPrivileges::createFrom + ), + new NamedWriteableRegistry.Entry( + ConfigurableClusterPrivilege.class, + ConfigurableClusterPrivileges.ManageRolesPrivilege.WRITEABLE_NAME, + ConfigurableClusterPrivileges.ManageRolesPrivilege::createFrom ) ) ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilegesTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilegesTests.java index c6fac77ea26e6..5599b33fbcfe7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilegesTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilegesTests.java @@ -61,13 +61,15 @@ public void testGenerateAndParseXContent() throws Exception { } private ConfigurableClusterPrivilege[] buildSecurityPrivileges() { - return switch (randomIntBetween(0, 3)) { + return switch (randomIntBetween(0, 4)) { case 0 -> new ConfigurableClusterPrivilege[0]; case 1 -> new ConfigurableClusterPrivilege[] { ManageApplicationPrivilegesTests.buildPrivileges() }; case 2 -> new ConfigurableClusterPrivilege[] { WriteProfileDataPrivilegesTests.buildPrivileges() }; - case 3 -> new ConfigurableClusterPrivilege[] { + case 3 -> new ConfigurableClusterPrivilege[] { ManageRolesPrivilegesTests.buildPrivileges() }; + case 4 -> new ConfigurableClusterPrivilege[] { ManageApplicationPrivilegesTests.buildPrivileges(), - WriteProfileDataPrivilegesTests.buildPrivileges() }; + WriteProfileDataPrivilegesTests.buildPrivileges(), + ManageRolesPrivilegesTests.buildPrivileges() }; default -> throw new IllegalStateException("Unexpected value"); }; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageRolesPrivilegesTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageRolesPrivilegesTests.java new file mode 100644 index 0000000000000..2d47752063d9d --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageRolesPrivilegesTests.java @@ -0,0 +1,351 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.authz.privilege; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.test.AbstractNamedWriteableTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.elasticsearch.xpack.core.security.action.role.BulkDeleteRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkPutRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; +import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; +import org.elasticsearch.xpack.core.security.authz.RestrictedIndices; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges.ManageRolesPrivilege; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.core.security.test.TestRestrictedIndices; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.Is.is; +import static org.hamcrest.core.IsEqual.equalTo; + +public class ManageRolesPrivilegesTests extends AbstractNamedWriteableTestCase { + + private static final int MIN_INDEX_NAME_LENGTH = 4; + + public void testSimplePutRoleRequest() { + new ReservedRolesStore(); + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "allowed*" }, new String[] { "all" })) + ); + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedIndexPatterns(permission, randomArray(1, 10, String[]::new, () -> "allowed-" + randomAlphaOfLength(5)), true); + assertAllowedIndexPatterns(permission, randomArray(1, 10, String[]::new, () -> "not-allowed-" + randomAlphaOfLength(5)), false); + assertAllowedIndexPatterns( + permission, + new String[] { "allowed-" + randomAlphaOfLength(5), "not-allowed-" + randomAlphaOfLength(5) }, + false + ); + } + + public void testDeleteRoleRequest() { + new ReservedRolesStore(); + { + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "allowed*" }, new String[] { "manage" })) + ); + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedDeleteIndex(permission, randomArray(1, 10, String[]::new, () -> "allowed-" + randomAlphaOfLength(5)), true); + assertAllowedDeleteIndex(permission, randomArray(1, 10, String[]::new, () -> "not-allowed-" + randomAlphaOfLength(5)), false); + assertAllowedDeleteIndex( + permission, + new String[] { "allowed-" + randomAlphaOfLength(5), "not-allowed-" + randomAlphaOfLength(5) }, + false + ); + } + { + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "allowed*" }, new String[] { "read" })) + ); + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + assertAllowedDeleteIndex(permission, randomArray(1, 10, String[]::new, () -> "allowed-" + randomAlphaOfLength(5)), false); + } + } + + public void testSeveralIndexGroupsPutRoleRequest() { + new ReservedRolesStore(); + + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of( + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "a", "b" }, new String[] { "read" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "c" }, new String[] { "read" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "d" }, new String[] { "read" }) + ) + ); + + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedIndexPatterns(permission, new String[] { "/[ab]/" }, new String[] { "read" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[cd]/" }, new String[] { "read" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[acd]/" }, new String[] { "read" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[ef]/" }, new String[] { "read" }, false); + } + + public void testPrivilegeIntersectionPutRoleRequest() { + new ReservedRolesStore(); + + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of( + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "a", "b" }, new String[] { "all" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "c" }, new String[] { "create" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "d" }, new String[] { "delete" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "e" }, new String[] { "create_doc" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "f" }, new String[] { "read", "manage" }) + ) + ); + + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedIndexPatterns(permission, new String[] { "/[ab]/" }, new String[] { "all" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[abc]/" }, new String[] { "all" }, false); + assertAllowedIndexPatterns(permission, new String[] { "/[ab]/" }, new String[] { "read", "manage" }, true); + + assertAllowedIndexPatterns(permission, new String[] { "/[ac]/" }, new String[] { "create" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[ac]/" }, new String[] { "create", "create_doc" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[ce]/" }, new String[] { "create_doc" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[abce]/" }, new String[] { "create_doc" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[abcde]/" }, new String[] { "create_doc" }, false); + assertAllowedIndexPatterns(permission, new String[] { "/[ce]/" }, new String[] { "create_doc" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[eb]/" }, new String[] { "create_doc" }, true); + + assertAllowedIndexPatterns(permission, new String[] { "/[d]/" }, new String[] { "delete" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[ad]/" }, new String[] { "delete" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[de]/" }, new String[] { "delete" }, false); + + assertAllowedIndexPatterns(permission, new String[] { "/[f]/" }, new String[] { "read", "manage" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[f]/" }, new String[] { "read", "write" }, false); + assertAllowedIndexPatterns(permission, new String[] { "/[f]/" }, new String[] { "read", "manage" }, true); + } + + public void testEmptyPrivileges() { + new ReservedRolesStore(); + + final ManageRolesPrivilege privilege = new ManageRolesPrivilege(List.of()); + + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedIndexPatterns(permission, new String[] { "test" }, new String[] { "all" }, false); + } + + public void testRestrictedIndexPutRoleRequest() { + new ReservedRolesStore(); + + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "*" }, new String[] { "all" })) + ); + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedIndexPatterns(permission, new String[] { "security" }, true); + assertAllowedIndexPatterns(permission, new String[] { ".security" }, false); + assertAllowedIndexPatterns(permission, new String[] { "security", ".security-7" }, false); + } + + public void testGenerateAndParseXContent() throws Exception { + final XContent xContent = randomFrom(XContentType.values()).xContent(); + try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { + final XContentBuilder builder = new XContentBuilder(xContent, out); + + final ManageRolesPrivilege original = buildPrivileges(); + builder.startObject(); + original.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + builder.flush(); + + final byte[] bytes = out.toByteArray(); + try (XContentParser parser = xContent.createParser(XContentParserConfiguration.EMPTY, bytes)) { + assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT)); + assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME)); + final ManageRolesPrivilege clone = ManageRolesPrivilege.parse(parser); + assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT)); + + assertThat(clone, equalTo(original)); + assertThat(original, equalTo(clone)); + } + } + } + + public void testPutRoleRequestContainsNonIndexPrivileges() { + new ReservedRolesStore(); + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "allowed*" }, new String[] { "all" })) + ); + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + final PutRoleRequest putRoleRequest = new PutRoleRequest(); + + switch (randomIntBetween(0, 5)) { + case 0: + putRoleRequest.cluster("all"); + break; + case 1: + putRoleRequest.runAs("test"); + break; + case 2: + putRoleRequest.addApplicationPrivileges( + RoleDescriptor.ApplicationResourcePrivileges.builder() + .privileges("all") + .application("test-app") + .resources("test-resource") + .build() + ); + break; + case 3: + putRoleRequest.addRemoteIndex( + new RoleDescriptor.RemoteIndicesPrivileges.Builder("test-cluster").privileges("all").indices("test*").build() + ); + break; + case 4: + putRoleRequest.putRemoteCluster( + new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "test" }) + ) + ); + break; + case 5: + putRoleRequest.conditionalCluster( + new ConfigurableClusterPrivileges.ManageRolesPrivilege( + List.of( + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "test-*" }, new String[] { "read" }) + ) + ) + ); + break; + } + + putRoleRequest.name(randomAlphaOfLength(4)); + assertThat(permissionCheck(permission, "cluster:admin/xpack/security/role/put", putRoleRequest), is(false)); + } + + private static boolean permissionCheck(ClusterPermission permission, String action, ActionRequest request) { + final Authentication authentication = AuthenticationTestHelper.builder().build(); + assertThat(request.validate(), nullValue()); + return permission.check(action, request, authentication); + } + + private static void assertAllowedIndexPatterns(ClusterPermission permission, String[] indexPatterns, boolean expected) { + assertAllowedIndexPatterns(permission, indexPatterns, new String[] { "index", "write", "indices:data/read" }, expected); + } + + private static void assertAllowedIndexPatterns( + ClusterPermission permission, + String[] indexPatterns, + String[] privileges, + boolean expected + ) { + { + final PutRoleRequest putRoleRequest = new PutRoleRequest(); + putRoleRequest.name(randomAlphaOfLength(3)); + putRoleRequest.addIndex(indexPatterns, privileges, null, null, null, false); + assertThat(permissionCheck(permission, "cluster:admin/xpack/security/role/put", putRoleRequest), is(expected)); + } + { + final BulkPutRolesRequest bulkPutRolesRequest = new BulkPutRolesRequest( + List.of( + new RoleDescriptor( + randomAlphaOfLength(3), + new String[] {}, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices(indexPatterns).privileges(privileges).build() }, + new String[] {} + ) + ) + ); + assertThat(permissionCheck(permission, "cluster:admin/xpack/security/role/bulk_put", bulkPutRolesRequest), is(expected)); + } + } + + private static void assertAllowedDeleteIndex(ClusterPermission permission, String[] indices, boolean expected) { + { + final BulkDeleteRolesRequest bulkDeleteRolesRequest = new BulkDeleteRolesRequest(List.of(indices)); + assertThat(permissionCheck(permission, "cluster:admin/xpack/security/role/bulk_delete", bulkDeleteRolesRequest), is(expected)); + } + { + assertThat(Arrays.stream(indices).allMatch(pattern -> { + final DeleteRoleRequest deleteRolesRequest = new DeleteRoleRequest(); + deleteRolesRequest.name(pattern); + return permissionCheck(permission, "cluster:admin/xpack/security/role/delete", deleteRolesRequest); + }), is(expected)); + } + } + + public static ManageRolesPrivilege buildPrivileges() { + return buildPrivileges(randomIntBetween(MIN_INDEX_NAME_LENGTH, 7)); + } + + private static ManageRolesPrivilege buildPrivileges(int indexNameLength) { + String[] indexNames = Objects.requireNonNull(generateRandomStringArray(5, indexNameLength, false, false)); + + return new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(indexNames, IndexPrivilege.READ.name().toArray(String[]::new))) + ); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + try (var xClientPlugin = new XPackClientPlugin()) { + return new NamedWriteableRegistry(xClientPlugin.getNamedWriteables()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + protected Class categoryClass() { + return ConfigurableClusterPrivilege.class; + } + + @Override + protected ConfigurableClusterPrivilege createTestInstance() { + return buildPrivileges(); + } + + @Override + protected ConfigurableClusterPrivilege mutateInstance(ConfigurableClusterPrivilege instance) throws IOException { + if (instance instanceof ManageRolesPrivilege) { + return buildPrivileges(MIN_INDEX_NAME_LENGTH - 1); + } + fail(); + return null; + } +} diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/ManageRolesPrivilegeIT.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/ManageRolesPrivilegeIT.java new file mode 100644 index 0000000000000..728f068adcae4 --- /dev/null +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/ManageRolesPrivilegeIT.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.test.TestSecurityClient; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; +import org.elasticsearch.xpack.core.security.user.User; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.core.StringContains.containsString; + +public class ManageRolesPrivilegeIT extends SecurityInBasicRestTestCase { + + private TestSecurityClient adminSecurityClient; + private static final SecureString TEST_PASSWORD = new SecureString("100%-secure-password".toCharArray()); + + @Before + public void setupClient() { + adminSecurityClient = new TestSecurityClient(adminClient()); + } + + public void testManageRoles() throws Exception { + createManageRolesRole("manage-roles-role", new String[0], Set.of("*-allowed-suffix"), Set.of("read", "write")); + createUser("test-user", Set.of("manage-roles-role")); + + String authHeader = basicAuthHeaderValue("test-user", TEST_PASSWORD); + + createRole( + authHeader, + new RoleDescriptor( + "manage-roles-role", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("test-allowed-suffix").privileges(Set.of("read", "write")).build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ); + + { + ResponseException responseException = assertThrows( + ResponseException.class, + () -> createRole( + authHeader, + new RoleDescriptor( + "manage-roles-role", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("test-suffix-not-allowed").privileges("write").build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ) + ); + + assertThat( + responseException.getMessage(), + containsString("this action is granted by the cluster privileges [manage_security,all]") + ); + } + + { + ResponseException responseException = assertThrows( + ResponseException.class, + () -> createRole( + authHeader, + new RoleDescriptor( + "manage-roles-role", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("test-allowed-suffix").privileges("manage").build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ) + ); + assertThat( + responseException.getMessage(), + containsString("this action is granted by the cluster privileges [manage_security,all]") + ); + } + } + + public void testManageSecurityNullifiesManageRoles() throws Exception { + createManageRolesRole("manage-roles-no-manage-security", new String[0], Set.of("allowed")); + createManageRolesRole("manage-roles-manage-security", new String[] { "manage_security" }, Set.of("allowed")); + + createUser("test-user-no-manage-security", Set.of("manage-roles-no-manage-security")); + createUser("test-user-manage-security", Set.of("manage-roles-manage-security")); + + String authHeaderNoManageSecurity = basicAuthHeaderValue("test-user-no-manage-security", TEST_PASSWORD); + String authHeaderManageSecurity = basicAuthHeaderValue("test-user-manage-security", TEST_PASSWORD); + + createRole( + authHeaderNoManageSecurity, + new RoleDescriptor( + "test-role-allowed-by-manage-roles", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("allowed").privileges("read").build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ); + + ResponseException responseException = assertThrows( + ResponseException.class, + () -> createRole( + authHeaderNoManageSecurity, + new RoleDescriptor( + "test-role-not-allowed-by-manage-roles", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("not-allowed").privileges("read").build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ) + ); + + assertThat( + responseException.getMessage(), + // TODO Should the new global role/manage privilege be listed here? Probably not because it's not documented + containsString("this action is granted by the cluster privileges [manage_security,all]") + ); + + createRole( + authHeaderManageSecurity, + new RoleDescriptor( + "test-role-not-allowed-by-manage-roles", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("not-allowed").privileges("read").build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ); + } + + private void createRole(String authHeader, RoleDescriptor descriptor) throws IOException { + TestSecurityClient userAuthSecurityClient = new TestSecurityClient( + adminClient(), + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", authHeader).build() + ); + userAuthSecurityClient.putRole(descriptor); + } + + private void createUser(String username, Set roles) throws IOException { + adminSecurityClient.putUser(new User(username, roles.toArray(String[]::new)), TEST_PASSWORD); + } + + private void createManageRolesRole(String roleName, String[] clusterPrivileges, Set indexPatterns) throws IOException { + createManageRolesRole(roleName, clusterPrivileges, indexPatterns, Set.of("read")); + } + + private void createManageRolesRole(String roleName, String[] clusterPrivileges, Set indexPatterns, Set privileges) + throws IOException { + adminSecurityClient.putRole( + new RoleDescriptor( + roleName, + clusterPrivileges, + new RoleDescriptor.IndicesPrivileges[0], + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[] { + new ConfigurableClusterPrivileges.ManageRolesPrivilege( + List.of( + new ConfigurableClusterPrivileges.ManageRolesPrivilege.ManageRolesIndexPermissionGroup( + indexPatterns.toArray(String[]::new), + privileges.toArray(String[]::new) + ) + ) + ) }, + new String[0], + Map.of(), + Map.of() + ) + ); + } +} diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java index 5ae84517202d4..667140b849951 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java @@ -31,6 +31,8 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import org.elasticsearch.xpack.security.SecurityOnTrialLicenseRestTestCase; import org.junit.After; import org.junit.Before; @@ -385,6 +387,50 @@ public void testGrantApiKeyWithOnlyManageOwnApiKeyPrivilegeFails() throws IOExce assertThat(e.getMessage(), containsString("action [" + GrantApiKeyAction.NAME + "] is unauthorized for user")); } + public void testApiKeyWithManageRoles() throws IOException { + RoleDescriptor role = roleWithManageRoles("manage-roles-role", new String[] { "manage_own_api_key" }, "allowed-prefix*"); + getSecurityClient().putRole(role); + createUser("test-user", END_USER_PASSWORD, List.of("manage-roles-role")); + + final Request createApiKeyrequest = new Request("POST", "_security/api_key"); + createApiKeyrequest.setOptions( + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", basicAuthHeaderValue("test-user", END_USER_PASSWORD)) + ); + final Map requestBody = Map.of( + "name", + "test-api-key", + "role_descriptors", + Map.of( + "test-role", + XContentTestUtils.convertToMap(roleWithManageRoles("test-role", new String[0], "allowed-prefix*")), + "another-test-role", + // This is not allowed by the limited-by-role (creator of the api key), so should not grant access to not-allowed=prefix* + XContentTestUtils.convertToMap(roleWithManageRoles("another-test-role", new String[0], "not-allowed-prefix*")) + ) + ); + + createApiKeyrequest.setJsonEntity(XContentTestUtils.convertToXContent(requestBody, XContentType.JSON).utf8ToString()); + Map responseMap = responseAsMap(client().performRequest(createApiKeyrequest)); + String encodedApiKey = responseMap.get("encoded").toString(); + + final Request createRoleRequest = new Request("POST", "_security/role/test-role"); + createRoleRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "ApiKey " + encodedApiKey)); + // Allowed role by manage roles permission + { + createRoleRequest.setJsonEntity(""" + {"indices": [{"names": ["allowed-prefix-test"],"privileges": ["read"]}]}"""); + assertOK(client().performRequest(createRoleRequest)); + } + // Not allowed role by manage roles permission + { + createRoleRequest.setJsonEntity(""" + {"indices": [{"names": ["not-allowed-prefix-test"],"privileges": ["read"]}]}"""); + final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(createRoleRequest)); + assertEquals(403, e.getResponse().getStatusLine().getStatusCode()); + assertThat(e.getMessage(), containsString("this action is granted by the cluster privileges [manage_security,all]")); + } + } + public void testUpdateApiKey() throws IOException { final var apiKeyName = "my-api-key-name"; final Map apiKeyMetadata = Map.of("not", "returned"); @@ -2393,6 +2439,27 @@ private void createRole(String name, Collection localClusterPrivileges, getSecurityClient().putRole(role); } + private RoleDescriptor roleWithManageRoles(String name, String[] clusterPrivileges, String indexPattern) { + return new RoleDescriptor( + name, + clusterPrivileges, + null, + null, + new ConfigurableClusterPrivilege[] { + new ConfigurableClusterPrivileges.ManageRolesPrivilege( + List.of( + new ConfigurableClusterPrivileges.ManageRolesPrivilege.ManageRolesIndexPermissionGroup( + new String[] { indexPattern }, + new String[] { "read" } + ) + ) + ) }, + null, + null, + null + ); + } + protected void createRoleWithDescription(String name, Collection clusterPrivileges, String description) throws IOException { final RoleDescriptor role = new RoleDescriptor( name, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index d88577f905e96..90566e25b4ea5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -100,6 +100,7 @@ import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.authz.store.RoleReference; import org.elasticsearch.xpack.core.security.support.MetadataUtils; @@ -137,6 +138,7 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.elasticsearch.TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE; import static org.elasticsearch.TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING; @@ -363,29 +365,10 @@ public void createApiKey( listener.onFailure(new IllegalArgumentException("authentication must be provided")); } else { final TransportVersion transportVersion = getMinTransportVersion(); - if (transportVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) - && hasRemoteIndices(request.getRoleDescriptors())) { - // Creating API keys with roles which define remote indices privileges is not allowed in a mixed cluster. - listener.onFailure( - new IllegalArgumentException( - "all nodes must have version [" - + TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion() - + "] or higher to support remote indices privileges for API keys" - ) - ); - return; - } - if (transportVersion.before(ROLE_REMOTE_CLUSTER_PRIVS) && hasRemoteCluster(request.getRoleDescriptors())) { - // Creating API keys with roles which define remote cluster privileges is not allowed in a mixed cluster. - listener.onFailure( - new IllegalArgumentException( - "all nodes must have version [" - + ROLE_REMOTE_CLUSTER_PRIVS - + "] or higher to support remote cluster privileges for API keys" - ) - ); + if (validateRoleDescriptorsForMixedCluster(listener, request.getRoleDescriptors(), transportVersion) == false) { return; } + if (transportVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) && request.getType() == ApiKey.Type.CROSS_CLUSTER) { listener.onFailure( @@ -407,15 +390,63 @@ && hasRemoteIndices(request.getRoleDescriptors())) { return; } - final Set userRolesWithoutDescription = removeUserRoleDescriptorDescriptions(userRoleDescriptors); - final Set filteredUserRoleDescriptors = maybeRemoveRemotePrivileges( - userRolesWithoutDescription, + Set filteredRoleDescriptors = filterRoleDescriptorsForMixedCluster( + userRoleDescriptors, transportVersion, request.getId() ); - createApiKeyAndIndexIt(authentication, request, filteredUserRoleDescriptors, listener); + createApiKeyAndIndexIt(authentication, request, filteredRoleDescriptors, listener); + } + } + + private Set filterRoleDescriptorsForMixedCluster( + final Set userRoleDescriptors, + final TransportVersion transportVersion, + final String... apiKeyIds + ) { + final Set userRolesWithoutDescription = removeUserRoleDescriptorDescriptions(userRoleDescriptors); + return maybeRemoveRemotePrivileges(userRolesWithoutDescription, transportVersion, apiKeyIds); + } + + private boolean validateRoleDescriptorsForMixedCluster( + final ActionListener listener, + final List roleDescriptors, + final TransportVersion transportVersion + ) { + if (transportVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) && hasRemoteIndices(roleDescriptors)) { + // API keys with roles which define remote indices privileges is not allowed in a mixed cluster. + listener.onFailure( + new IllegalArgumentException( + "all nodes must have version [" + + TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion() + + "] or higher to support remote indices privileges for API keys" + ) + ); + return false; + } + if (transportVersion.before(ROLE_REMOTE_CLUSTER_PRIVS) && hasRemoteCluster(roleDescriptors)) { + // API keys with roles which define remote cluster privileges is not allowed in a mixed cluster. + listener.onFailure( + new IllegalArgumentException( + "all nodes must have version [" + + ROLE_REMOTE_CLUSTER_PRIVS + + "] or higher to support remote cluster privileges for API keys" + ) + ); + return false; + } + if (transportVersion.before(ADD_MANAGE_ROLES_PRIVILEGE) && hasGlobalManageRolesPrivilege(roleDescriptors)) { + listener.onFailure( + new IllegalArgumentException( + "all nodes must have version [" + + ADD_MANAGE_ROLES_PRIVILEGE + + "] or higher to support the manage roles privilege for API keys" + ) + ); + return false; } + return true; } /** @@ -458,6 +489,13 @@ private static boolean hasRemoteCluster(Collection roleDescripto return roleDescriptors != null && roleDescriptors.stream().anyMatch(RoleDescriptor::hasRemoteClusterPermissions); } + private static boolean hasGlobalManageRolesPrivilege(Collection roleDescriptors) { + return roleDescriptors != null + && roleDescriptors.stream() + .flatMap(roleDescriptor -> Arrays.stream(roleDescriptor.getConditionalClusterPrivileges())) + .anyMatch(privilege -> privilege instanceof ConfigurableClusterPrivileges.ManageRolesPrivilege); + } + private static IllegalArgumentException validateWorkflowsRestrictionConstraints( TransportVersion transportVersion, List requestRoleDescriptors, @@ -594,28 +632,11 @@ public void updateApiKeys( } final TransportVersion transportVersion = getMinTransportVersion(); - if (transportVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) && hasRemoteIndices(request.getRoleDescriptors())) { - // Updating API keys with roles which define remote indices privileges is not allowed in a mixed cluster. - listener.onFailure( - new IllegalArgumentException( - "all nodes must have version [" - + TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion() - + "] or higher to support remote indices privileges for API keys" - ) - ); - return; - } - if (transportVersion.before(ROLE_REMOTE_CLUSTER_PRIVS) && hasRemoteCluster(request.getRoleDescriptors())) { - // Updating API keys with roles which define remote cluster privileges is not allowed in a mixed cluster. - listener.onFailure( - new IllegalArgumentException( - "all nodes must have version [" - + ROLE_REMOTE_CLUSTER_PRIVS - + "] or higher to support remote indices privileges for API keys" - ) - ); + + if (validateRoleDescriptorsForMixedCluster(listener, request.getRoleDescriptors(), transportVersion) == false) { return; } + final Exception workflowsValidationException = validateWorkflowsRestrictionConstraints( transportVersion, request.getRoleDescriptors(), @@ -627,22 +648,22 @@ public void updateApiKeys( } final String[] apiKeyIds = request.getIds().toArray(String[]::new); - final Set userRolesWithoutDescription = removeUserRoleDescriptorDescriptions(userRoleDescriptors); - final Set filteredUserRoleDescriptors = maybeRemoveRemotePrivileges( - userRolesWithoutDescription, - transportVersion, - apiKeyIds - ); if (logger.isDebugEnabled()) { logger.debug("Updating [{}] API keys", buildDelimitedStringWithLimit(10, apiKeyIds)); } + Set filteredRoleDescriptors = filterRoleDescriptorsForMixedCluster( + userRoleDescriptors, + transportVersion, + apiKeyIds + ); + findVersionedApiKeyDocsForSubject( authentication, apiKeyIds, ActionListener.wrap( - versionedDocs -> updateApiKeys(authentication, request, filteredUserRoleDescriptors, versionedDocs, listener), + versionedDocs -> updateApiKeys(authentication, request, filteredRoleDescriptors, versionedDocs, listener), ex -> listener.onFailure(traceLog("bulk update", ex)) ) ); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index a2d2b21b489ea..9ddda193dba39 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -60,6 +60,7 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; import org.elasticsearch.xpack.core.security.authz.support.DLSRoleQueryValidator; import org.elasticsearch.xpack.core.security.support.NativeRealmValidationUtil; @@ -476,7 +477,15 @@ private Exception validateRoleDescriptor(RoleDescriptor role) { + TransportVersions.SECURITY_ROLE_DESCRIPTION.toReleaseVersion() + "] or higher to support specifying role description" ); - } + } else if (Arrays.stream(role.getConditionalClusterPrivileges()) + .anyMatch(privilege -> privilege instanceof ConfigurableClusterPrivileges.ManageRolesPrivilege) + && clusterService.state().getMinTransportVersion().before(TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE)) { + return new IllegalStateException( + "all nodes must have version [" + + TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE.toReleaseVersion() + + "] or higher to support the manage roles privilege" + ); + } try { DLSRoleQueryValidator.validateQueryField(role.getIndicesPrivileges(), xContentRegistry); } catch (ElasticsearchException | IllegalArgumentException e) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java index 4c5ce703f48ad..9541dd9dc470d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java @@ -36,6 +36,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_PROFILE_ORIGIN; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_VERSION_STRING; +import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SecurityMainIndexMappingVersion.ADD_MANAGE_ROLES_PRIVILEGE; /** * Responsible for handling system indices for the Security plugin @@ -409,6 +410,40 @@ private XContentBuilder getMainIndexMappings(SecurityMainIndexMappingVersion map builder.endObject(); } builder.endObject(); + if (mappingVersion.onOrAfter(ADD_MANAGE_ROLES_PRIVILEGE)) { + builder.startObject("role"); + { + builder.field("type", "object"); + builder.startObject("properties"); + { + builder.startObject("manage"); + { + builder.field("type", "object"); + builder.startObject("properties"); + { + builder.startObject("indices"); + { + builder.startObject("properties"); + { + builder.startObject("names"); + builder.field("type", "keyword"); + builder.endObject(); + builder.startObject("privileges"); + builder.field("type", "keyword"); + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } } builder.endObject(); } @@ -1050,6 +1085,11 @@ public enum SecurityMainIndexMappingVersion implements VersionId(Arrays.asList("", "\""))), - new ConfigurableClusterPrivileges.ManageApplicationPrivileges(Set.of("\"")) }, + new ConfigurableClusterPrivileges.ManageApplicationPrivileges(Set.of("\"")), + new ConfigurableClusterPrivileges.ManageRolesPrivilege( + List.of( + new ConfigurableClusterPrivileges.ManageRolesPrivilege.ManageRolesIndexPermissionGroup( + new String[] { "test*" }, + new String[] { "read", "write" } + ) + ) + ) }, new String[] { "\"[a]/" }, Map.of(), Map.of() diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/audit/logfile/audited_roles.txt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/audit/logfile/audited_roles.txt index 7b5e24c97d65a..f913c8608960b 100644 --- a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/audit/logfile/audited_roles.txt +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/audit/logfile/audited_roles.txt @@ -7,6 +7,6 @@ role_descriptor2 role_descriptor3 {"cluster":[],"indices":[],"applications":[{"application":"maps","privileges":["{","}","\n","\\","\""],"resources":["raster:*"]},{"application":"maps","privileges":["*:*"],"resources":["noooooo!!\n\n\f\\\\r","{"]}],"run_as":["jack","nich*","//\""],"metadata":{"some meta":42}} role_descriptor4 -{"cluster":["manage_ml","grant_api_key","manage_rollup"],"global":{"application":{"manage":{"applications":["a+b+|b+a+"]}},"profile":{}},"indices":[{"names":["/. ? + * | { } [ ] ( ) \" \\/","*"],"privileges":["read","read_cross_cluster"],"field_security":{"grant":["almost","all*"],"except":["denied*"]}}],"applications":[],"run_as":["//+a+\"[a]/"],"metadata":{"?list":["e1","e2","*"],"some other meta":{"r":"t"}}} +{"cluster":["manage_ml","grant_api_key","manage_rollup"],"global":{"application":{"manage":{"applications":["a+b+|b+a+"]}},"profile":{},"role":{}},"indices":[{"names":["/. ? + * | { } [ ] ( ) \" \\/","*"],"privileges":["read","read_cross_cluster"],"field_security":{"grant":["almost","all*"],"except":["denied*"]}}],"applications":[],"run_as":["//+a+\"[a]/"],"metadata":{"?list":["e1","e2","*"],"some other meta":{"r":"t"}}} role_descriptor5 -{"cluster":["all"],"global":{"application":{"manage":{"applications":["\""]}},"profile":{"write":{"applications":["","\""]}}},"indices":[],"applications":[],"run_as":["\"[a]/"]} +{"cluster":["all"],"global":{"application":{"manage":{"applications":["\""]}},"profile":{"write":{"applications":["","\""]}},"role":{"manage":{"indices":[{"names":["test*"],"privileges":["read","write"]}]}}},"indices":[],"applications":[],"run_as":["\"[a]/"]} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RolesBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RolesBackwardsCompatibilityIT.java index 4f4ff1d5743ee..650779cfbc85d 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RolesBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RolesBackwardsCompatibilityIT.java @@ -29,6 +29,7 @@ import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTestHelper.randomApplicationPrivileges; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTestHelper.randomIndicesPrivileges; +import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTestHelper.randomManageRolesPrivileges; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTestHelper.randomRoleDescriptorMetadata; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -40,7 +41,7 @@ public class RolesBackwardsCompatibilityIT extends AbstractUpgradeTestCase { private RestClient oldVersionClient = null; private RestClient newVersionClient = null; - public void testCreatingAndUpdatingRoles() throws Exception { + public void testRolesWithDescription() throws Exception { assumeTrue( "The role description is supported after transport version: " + TransportVersions.SECURITY_ROLE_DESCRIPTION, minimumTransportVersion().before(TransportVersions.SECURITY_ROLE_DESCRIPTION) @@ -48,14 +49,14 @@ public void testCreatingAndUpdatingRoles() throws Exception { switch (CLUSTER_TYPE) { case OLD -> { // Creating role in "old" cluster should succeed when description is not provided - final String initialRole = randomRoleDescriptorSerialized(false); + final String initialRole = randomRoleDescriptorSerialized(); createRole(client(), "my-old-role", initialRole); - updateRole("my-old-role", randomValueOtherThan(initialRole, () -> randomRoleDescriptorSerialized(false))); + updateRole("my-old-role", randomValueOtherThan(initialRole, RolesBackwardsCompatibilityIT::randomRoleDescriptorSerialized)); // and fail if we include description var createException = expectThrows( Exception.class, - () -> createRole(client(), "my-invalid-old-role", randomRoleDescriptorSerialized(true)) + () -> createRole(client(), "my-invalid-old-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( createException.getMessage(), @@ -65,7 +66,7 @@ public void testCreatingAndUpdatingRoles() throws Exception { RestClient client = client(); var updateException = expectThrows( Exception.class, - () -> updateRole(client, "my-old-role", randomRoleDescriptorSerialized(true)) + () -> updateRole(client, "my-old-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( updateException.getMessage(), @@ -74,17 +75,20 @@ public void testCreatingAndUpdatingRoles() throws Exception { } case MIXED -> { try { - this.createClientsByVersion(); + this.createClientsByVersion(TransportVersions.SECURITY_ROLE_DESCRIPTION); // succeed when role description is not provided - final String initialRole = randomRoleDescriptorSerialized(false); + final String initialRole = randomRoleDescriptorSerialized(); createRole(client(), "my-valid-mixed-role", initialRole); - updateRole("my-valid-mixed-role", randomValueOtherThan(initialRole, () -> randomRoleDescriptorSerialized(false))); + updateRole( + "my-valid-mixed-role", + randomValueOtherThan(initialRole, RolesBackwardsCompatibilityIT::randomRoleDescriptorSerialized) + ); // against old node, fail when description is provided either in update or create request { Exception e = expectThrows( Exception.class, - () -> updateRole(oldVersionClient, "my-valid-mixed-role", randomRoleDescriptorSerialized(true)) + () -> updateRole(oldVersionClient, "my-valid-mixed-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( e.getMessage(), @@ -94,7 +98,7 @@ public void testCreatingAndUpdatingRoles() throws Exception { { Exception e = expectThrows( Exception.class, - () -> createRole(oldVersionClient, "my-invalid-mixed-role", randomRoleDescriptorSerialized(true)) + () -> createRole(oldVersionClient, "my-invalid-mixed-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( e.getMessage(), @@ -106,7 +110,7 @@ public void testCreatingAndUpdatingRoles() throws Exception { { Exception e = expectThrows( Exception.class, - () -> createRole(newVersionClient, "my-invalid-mixed-role", randomRoleDescriptorSerialized(true)) + () -> createRole(newVersionClient, "my-invalid-mixed-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( e.getMessage(), @@ -120,7 +124,7 @@ public void testCreatingAndUpdatingRoles() throws Exception { { Exception e = expectThrows( Exception.class, - () -> updateRole(newVersionClient, "my-valid-mixed-role", randomRoleDescriptorSerialized(true)) + () -> updateRole(newVersionClient, "my-valid-mixed-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( e.getMessage(), @@ -138,11 +142,129 @@ public void testCreatingAndUpdatingRoles() throws Exception { case UPGRADED -> { // on upgraded cluster which supports new description field // create/update requests should succeed either way (with or without description) - final String initialRole = randomRoleDescriptorSerialized(randomBoolean()); + final String initialRole = randomFrom(randomRoleDescriptorSerialized(), randomRoleDescriptorWithDescriptionSerialized()); createRole(client(), "my-valid-upgraded-role", initialRole); updateRole( "my-valid-upgraded-role", - randomValueOtherThan(initialRole, () -> randomRoleDescriptorSerialized(randomBoolean())) + randomValueOtherThan( + initialRole, + () -> randomFrom(randomRoleDescriptorSerialized(), randomRoleDescriptorWithDescriptionSerialized()) + ) + ); + } + } + } + + public void testRolesWithManageRoles() throws Exception { + assumeTrue( + "The manage roles privilege is supported after transport version: " + TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE, + minimumTransportVersion().before(TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE) + ); + switch (CLUSTER_TYPE) { + case OLD -> { + // Creating role in "old" cluster should succeed when manage roles is not provided + final String initialRole = randomRoleDescriptorSerialized(); + createRole(client(), "my-old-role", initialRole); + updateRole("my-old-role", randomValueOtherThan(initialRole, RolesBackwardsCompatibilityIT::randomRoleDescriptorSerialized)); + + // and fail if we include manage roles + var createException = expectThrows( + Exception.class, + () -> createRole(client(), "my-invalid-old-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + assertThat( + createException.getMessage(), + allOf(containsString("failed to parse privilege"), containsString("but found [role] instead")) + ); + + RestClient client = client(); + var updateException = expectThrows( + Exception.class, + () -> updateRole(client, "my-old-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + assertThat( + updateException.getMessage(), + allOf(containsString("failed to parse privilege"), containsString("but found [role] instead")) + ); + } + case MIXED -> { + try { + this.createClientsByVersion(TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE); + // succeed when role manage roles is not provided + final String initialRole = randomRoleDescriptorSerialized(); + createRole(client(), "my-valid-mixed-role", initialRole); + updateRole( + "my-valid-mixed-role", + randomValueOtherThan(initialRole, RolesBackwardsCompatibilityIT::randomRoleDescriptorSerialized) + ); + + // against old node, fail when manage roles is provided either in update or create request + { + Exception e = expectThrows( + Exception.class, + () -> updateRole(oldVersionClient, "my-valid-mixed-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + assertThat( + e.getMessage(), + allOf(containsString("failed to parse privilege"), containsString("but found [role] instead")) + ); + } + { + Exception e = expectThrows( + Exception.class, + () -> createRole(oldVersionClient, "my-invalid-mixed-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + assertThat( + e.getMessage(), + allOf(containsString("failed to parse privilege"), containsString("but found [role] instead")) + ); + } + + // and against new node in a mixed cluster we should fail + { + Exception e = expectThrows( + Exception.class, + () -> createRole(newVersionClient, "my-invalid-mixed-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + + assertThat( + e.getMessage(), + containsString( + "all nodes must have version [" + + TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE.toReleaseVersion() + + "] or higher to support the manage roles privilege" + ) + ); + } + { + Exception e = expectThrows( + Exception.class, + () -> updateRole(newVersionClient, "my-valid-mixed-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + assertThat( + e.getMessage(), + containsString( + "all nodes must have version [" + + TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE.toReleaseVersion() + + "] or higher to support the manage roles privilege" + ) + ); + } + } finally { + this.closeClientsByVersion(); + } + } + case UPGRADED -> { + // on upgraded cluster which supports new description field + // create/update requests should succeed either way (with or without description) + final String initialRole = randomFrom(randomRoleDescriptorSerialized(), randomRoleDescriptorWithManageRolesSerialized()); + createRole(client(), "my-valid-upgraded-role", initialRole); + updateRole( + "my-valid-upgraded-role", + randomValueOtherThan( + initialRole, + () -> randomFrom(randomRoleDescriptorSerialized(), randomRoleDescriptorWithManageRolesSerialized()) + ) ); } } @@ -166,10 +288,22 @@ private void updateRole(RestClient client, String roleName, String payload) thro assertThat(created, equalTo(false)); } - private static String randomRoleDescriptorSerialized(boolean includeDescription) { + private static String randomRoleDescriptorSerialized() { + return randomRoleDescriptorSerialized(false, false); + } + + private static String randomRoleDescriptorWithDescriptionSerialized() { + return randomRoleDescriptorSerialized(true, false); + } + + private static String randomRoleDescriptorWithManageRolesSerialized() { + return randomRoleDescriptorSerialized(false, true); + } + + private static String randomRoleDescriptorSerialized(boolean includeDescription, boolean includeManageRoles) { try { return XContentTestUtils.convertToXContent( - XContentTestUtils.convertToMap(randomRoleDescriptor(includeDescription)), + XContentTestUtils.convertToMap(randomRoleDescriptor(includeDescription, includeManageRoles)), XContentType.JSON ).utf8ToString(); } catch (IOException e) { @@ -177,26 +311,26 @@ private static String randomRoleDescriptorSerialized(boolean includeDescription) } } - private boolean nodeSupportRoleDescription(Map nodeDetails) { + private boolean nodeSupportTransportVersion(Map nodeDetails, TransportVersion transportVersion) { String nodeVersionString = (String) nodeDetails.get("version"); - TransportVersion transportVersion = getTransportVersionWithFallback( + TransportVersion nodeTransportVersion = getTransportVersionWithFallback( nodeVersionString, nodeDetails.get("transport_version"), () -> TransportVersions.ZERO ); - if (transportVersion.equals(TransportVersions.ZERO)) { + if (nodeTransportVersion.equals(TransportVersions.ZERO)) { // In cases where we were not able to find a TransportVersion, a pre-8.8.0 node answered about a newer (upgraded) node. // In that case, the node will be current (upgraded), and remote indices are supported for sure. var nodeIsCurrent = nodeVersionString.equals(Build.current().version()); assertTrue(nodeIsCurrent); return true; } - return transportVersion.onOrAfter(TransportVersions.SECURITY_ROLE_DESCRIPTION); + return nodeTransportVersion.onOrAfter(transportVersion); } - private void createClientsByVersion() throws IOException { - var clientsByCapability = getRestClientByCapability(); + private void createClientsByVersion(TransportVersion transportVersion) throws IOException { + var clientsByCapability = getRestClientByCapability(transportVersion); if (clientsByCapability.size() == 2) { for (Map.Entry client : clientsByCapability.entrySet()) { if (client.getKey() == false) { @@ -224,7 +358,7 @@ private void closeClientsByVersion() throws IOException { } @SuppressWarnings("unchecked") - private Map getRestClientByCapability() throws IOException { + private Map getRestClientByCapability(TransportVersion transportVersion) throws IOException { Response response = client().performRequest(new Request("GET", "_nodes")); assertOK(response); ObjectPath objectPath = ObjectPath.createFromResponse(response); @@ -232,7 +366,7 @@ private Map getRestClientByCapability() throws IOException Map> hostsByCapability = new HashMap<>(); for (Map.Entry entry : nodesAsMap.entrySet()) { Map nodeDetails = (Map) entry.getValue(); - var capabilitySupported = nodeSupportRoleDescription(nodeDetails); + var capabilitySupported = nodeSupportTransportVersion(nodeDetails, transportVersion); Map httpInfo = (Map) nodeDetails.get("http"); hostsByCapability.computeIfAbsent(capabilitySupported, k -> new ArrayList<>()) .add(HttpHost.create((String) httpInfo.get("publish_address"))); @@ -244,7 +378,7 @@ private Map getRestClientByCapability() throws IOException return clientsByCapability; } - private static RoleDescriptor randomRoleDescriptor(boolean includeDescription) { + private static RoleDescriptor randomRoleDescriptor(boolean includeDescription, boolean includeManageRoles) { final Set excludedPrivileges = Set.of( "cross_cluster_replication", "cross_cluster_replication_internal", @@ -255,7 +389,7 @@ private static RoleDescriptor randomRoleDescriptor(boolean includeDescription) { randomSubsetOf(Set.of("all", "monitor", "none")).toArray(String[]::new), randomIndicesPrivileges(0, 3, excludedPrivileges), randomApplicationPrivileges(), - null, + includeManageRoles ? randomManageRolesPrivileges() : null, generateRandomStringArray(5, randomIntBetween(2, 8), false, true), randomRoleDescriptorMetadata(false), Map.of(), From f150e2c11df0fe3bef298c55bd867437e50f5f73 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Aug 2024 14:34:02 +0100 Subject: [PATCH 225/389] Add telemetry for repository usage (#112133) Adds to the `GET _cluster/stats` endpoint information about the snapshot repositories in use, including their types, whether they are read-only or read-write, and for Azure repositories the kind of credentials in use. --- docs/changelog/112133.yaml | 5 ++ docs/reference/cluster/stats.asciidoc | 31 +++++++++- .../repositories/azure/AzureRepository.java | 6 ++ .../azure/AzureStorageService.java | 12 ++++ .../azure/AzureStorageSettings.java | 12 ++++ .../test/repository_azure/20_repository.yml | 13 ++++ .../test/repository_gcs/20_repository.yml | 13 ++++ .../20_repository_permanent_credentials.yml | 13 ++++ .../30_repository_temporary_credentials.yml | 13 ++++ .../40_repository_ec2_credentials.yml | 13 ++++ .../50_repository_ecs_credentials.yml | 13 ++++ .../60_repository_sts_credentials.yml | 13 ++++ server/src/main/java/module-info.java | 1 + .../org/elasticsearch/TransportVersions.java | 2 + .../stats/ClusterStatsNodeResponse.java | 36 ++++++----- .../cluster/stats/ClusterStatsResponse.java | 12 ++++ .../cluster/stats/RepositoryUsageStats.java | 59 +++++++++++++++++++ .../stats/TransportClusterStatsAction.java | 19 ++++-- .../cluster/health/ClusterHealthStatus.java | 2 +- .../repositories/RepositoriesFeatures.java | 23 ++++++++ .../repositories/RepositoriesService.java | 27 +++++++-- .../repositories/Repository.java | 8 +++ .../blobstore/BlobStoreRepository.java | 25 ++++++++ ...lasticsearch.features.FeatureSpecification | 1 + .../cluster/stats/VersionStatsTests.java | 3 +- .../ClusterStatsMonitoringDocTests.java | 25 ++++---- .../AzureRepositoryAnalysisRestIT.java | 37 ++++++++++++ 27 files changed, 400 insertions(+), 37 deletions(-) create mode 100644 docs/changelog/112133.yaml create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RepositoryUsageStats.java create mode 100644 server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java diff --git a/docs/changelog/112133.yaml b/docs/changelog/112133.yaml new file mode 100644 index 0000000000000..11109402b7373 --- /dev/null +++ b/docs/changelog/112133.yaml @@ -0,0 +1,5 @@ +pr: 112133 +summary: Add telemetry for repository usage +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 3b429ef427071..c39bc0dcd2878 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -1282,6 +1282,31 @@ They are included here for expert users, but should otherwise be ignored. ===== +==== + +`repositories`:: +(object) Contains statistics about the <> repositories defined in the cluster, broken down +by repository type. ++ +.Properties of `repositories` +[%collapsible%open] +===== + +`count`::: +(integer) The number of repositories of this type in the cluster. + +`read_only`::: +(integer) The number of repositories of this type in the cluster which are registered read-only. + +`read_write`::: +(integer) The number of repositories of this type in the cluster which are not registered as read-only. + +Each repository type may also include other statistics about the repositories of that type here. + +===== + +==== + [[cluster-stats-api-example]] ==== {api-examples-title} @@ -1579,6 +1604,9 @@ The API returns the following response: }, "snapshots": { ... + }, + "repositories": { + ... } } -------------------------------------------------- @@ -1589,6 +1617,7 @@ The API returns the following response: // TESTRESPONSE[s/"count": \{[^\}]*\}/"count": $body.$_path/] // TESTRESPONSE[s/"packaging_types": \[[^\]]*\]/"packaging_types": $body.$_path/] // TESTRESPONSE[s/"snapshots": \{[^\}]*\}/"snapshots": $body.$_path/] +// TESTRESPONSE[s/"repositories": \{[^\}]*\}/"repositories": $body.$_path/] // TESTRESPONSE[s/"field_types": \[[^\]]*\]/"field_types": $body.$_path/] // TESTRESPONSE[s/"runtime_field_types": \[[^\]]*\]/"runtime_field_types": $body.$_path/] // TESTRESPONSE[s/"search": \{[^\}]*\}/"search": $body.$_path/] @@ -1600,7 +1629,7 @@ The API returns the following response: // the plugins that will be in it. And because we figure folks don't need to // see an exhaustive list anyway. // 2. Similarly, ignore the contents of `network_types`, `discovery_types`, -// `packaging_types` and `snapshots`. +// `packaging_types`, `snapshots` and `repositories`. // 3. Ignore the contents of the (nodes) count object, as what's shown here // depends on the license. Voting-only nodes are e.g. only shown when this // test runs with a basic license. diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 388474acc75ea..c8c0b15db5ebe 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -26,6 +26,7 @@ import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.function.Function; import static org.elasticsearch.core.Strings.format; @@ -175,4 +176,9 @@ protected ByteSizeValue chunkSize() { public boolean isReadOnly() { return readonly; } + + @Override + protected Set getExtraUsageFeatures() { + return storageService.getExtraUsageFeatures(Repository.CLIENT_NAME.get(getMetadata().settings())); + } } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 0d6cd7bf3d246..09088004759a8 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -24,6 +24,7 @@ import java.net.Proxy; import java.net.URL; import java.util.Map; +import java.util.Set; import java.util.function.BiConsumer; import static java.util.Collections.emptyMap; @@ -165,4 +166,15 @@ public void refreshSettings(Map clientsSettings) { this.storageSettings = Map.copyOf(clientsSettings); // clients are built lazily by {@link client(String, LocationMode)} } + + /** + * For Azure repositories, we report the different kinds of credentials in use in the telemetry. + */ + public Set getExtraUsageFeatures(String clientName) { + try { + return getClientSettings(clientName).credentialsUsageFeatures(); + } catch (Exception e) { + return Set.of(); + } + } } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java index b3e8dd8898bea..2333a1fdb9e93 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java @@ -29,6 +29,7 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Set; final class AzureStorageSettings { @@ -130,6 +131,7 @@ final class AzureStorageSettings { private final int maxRetries; private final Proxy proxy; private final boolean hasCredentials; + private final Set credentialsUsageFeatures; private AzureStorageSettings( String account, @@ -150,6 +152,12 @@ private AzureStorageSettings( this.endpointSuffix = endpointSuffix; this.timeout = timeout; this.maxRetries = maxRetries; + this.credentialsUsageFeatures = Strings.hasText(key) ? Set.of("uses_key_credentials") + : Strings.hasText(sasToken) ? Set.of("uses_sas_token") + : SocketAccess.doPrivilegedException(() -> System.getenv("AZURE_FEDERATED_TOKEN_FILE")) == null + ? Set.of("uses_default_credentials", "uses_managed_identity") + : Set.of("uses_default_credentials", "uses_workload_identity"); + // Register the proxy if we have any // Validate proxy settings if (proxyType.equals(Proxy.Type.DIRECT) && ((proxyPort != 0) || Strings.hasText(proxyHost))) { @@ -366,4 +374,8 @@ private String deriveURIFromSettings(boolean isPrimary) { throw new IllegalArgumentException(e); } } + + public Set credentialsUsageFeatures() { + return credentialsUsageFeatures; + } } diff --git a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml index 299183f26d9dc..a4a7d0b22a0ed 100644 --- a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml +++ b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml @@ -235,6 +235,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.azure.count: 1 } + - gte: { repositories.azure.read_write: 1 } + --- teardown: diff --git a/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml b/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml index 68d61be4983c5..e8c34a4b6a20b 100644 --- a/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml +++ b/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml @@ -232,6 +232,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.gcs.count: 1 } + - gte: { repositories.gcs.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml index 77870697f93ae..e88a0861ec01c 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml @@ -345,6 +345,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml index 4a62d6183470d..501af980e17e3 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml @@ -256,6 +256,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml index e24ff1ad0e559..129f0ba5d7588 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml @@ -256,6 +256,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml index 9c332cc7d9301..de334b4b3df96 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml @@ -256,6 +256,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml index 24c2b2b1741d6..09a8526017960 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml @@ -257,6 +257,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index c223db531e688..d412748ed4e57 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -429,6 +429,7 @@ org.elasticsearch.cluster.metadata.MetadataFeatures, org.elasticsearch.rest.RestFeatures, org.elasticsearch.indices.IndicesFeatures, + org.elasticsearch.repositories.RepositoriesFeatures, org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures, org.elasticsearch.index.mapper.MapperFeatures, org.elasticsearch.ingest.IngestGeoIpFeatures, diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 582c618216999..41fa34bb5a4a3 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -199,6 +199,8 @@ static TransportVersion def(int id) { public static final TransportVersion RANK_DOCS_RETRIEVER = def(8_729_00_0); public static final TransportVersion ESQL_ES_FIELD_CACHED_SERIALIZATION = def(8_730_00_0); public static final TransportVersion ADD_MANAGE_ROLES_PRIVILEGE = def(8_731_00_0); + public static final TransportVersion REPOSITORIES_TELEMETRY = def(8_732_00_0); + /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index d74889b623589..b48295dc8b3eb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -20,29 +20,33 @@ import org.elasticsearch.core.Nullable; import java.io.IOException; +import java.util.Objects; public class ClusterStatsNodeResponse extends BaseNodeResponse { private final NodeInfo nodeInfo; private final NodeStats nodeStats; private final ShardStats[] shardsStats; - private ClusterHealthStatus clusterStatus; + private final ClusterHealthStatus clusterStatus; private final SearchUsageStats searchUsageStats; + private final RepositoryUsageStats repositoryUsageStats; public ClusterStatsNodeResponse(StreamInput in) throws IOException { super(in); - clusterStatus = null; - if (in.readBoolean()) { - clusterStatus = ClusterHealthStatus.readFrom(in); - } + this.clusterStatus = in.readOptionalWriteable(ClusterHealthStatus::readFrom); this.nodeInfo = new NodeInfo(in); this.nodeStats = new NodeStats(in); - shardsStats = in.readArray(ShardStats::new, ShardStats[]::new); + this.shardsStats = in.readArray(ShardStats::new, ShardStats[]::new); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { searchUsageStats = new SearchUsageStats(in); } else { searchUsageStats = new SearchUsageStats(); } + if (in.getTransportVersion().onOrAfter(TransportVersions.REPOSITORIES_TELEMETRY)) { + repositoryUsageStats = RepositoryUsageStats.readFrom(in); + } else { + repositoryUsageStats = RepositoryUsageStats.EMPTY; + } } public ClusterStatsNodeResponse( @@ -51,14 +55,16 @@ public ClusterStatsNodeResponse( NodeInfo nodeInfo, NodeStats nodeStats, ShardStats[] shardsStats, - SearchUsageStats searchUsageStats + SearchUsageStats searchUsageStats, + RepositoryUsageStats repositoryUsageStats ) { super(node); this.nodeInfo = nodeInfo; this.nodeStats = nodeStats; this.shardsStats = shardsStats; this.clusterStatus = clusterStatus; - this.searchUsageStats = searchUsageStats; + this.searchUsageStats = Objects.requireNonNull(searchUsageStats); + this.repositoryUsageStats = Objects.requireNonNull(repositoryUsageStats); } public NodeInfo nodeInfo() { @@ -85,20 +91,22 @@ public SearchUsageStats searchUsageStats() { return searchUsageStats; } + public RepositoryUsageStats repositoryUsageStats() { + return repositoryUsageStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (clusterStatus == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeByte(clusterStatus.value()); - } + out.writeOptionalWriteable(clusterStatus); nodeInfo.writeTo(out); nodeStats.writeTo(out); out.writeArray(shardsStats); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { searchUsageStats.writeTo(out); } + if (out.getTransportVersion().onOrAfter(TransportVersions.REPOSITORIES_TELEMETRY)) { + repositoryUsageStats.writeTo(out); + } // else just drop these stats, ok for bwc } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java index 36e7b247befac..b6dd40e8c8b79 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -30,6 +30,7 @@ public class ClusterStatsResponse extends BaseNodesResponse r.isEmpty() == false) + // stats should be the same on every node so just pick one of them + .findAny() + .orElse(RepositoryUsageStats.EMPTY); } public String getClusterUUID() { @@ -113,6 +122,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("snapshots"); clusterSnapshotStats.toXContent(builder, params); + builder.field("repositories"); + repositoryUsageStats.toXContent(builder, params); + return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RepositoryUsageStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RepositoryUsageStats.java new file mode 100644 index 0000000000000..771aa0fbef842 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RepositoryUsageStats.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.stats; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; + +/** + * Stats on repository feature usage exposed in cluster stats for telemetry. + * + * @param statsByType a count of the repositories using various named features, keyed by repository type and then by feature name. + */ +public record RepositoryUsageStats(Map> statsByType) implements Writeable, ToXContentObject { + + public static final RepositoryUsageStats EMPTY = new RepositoryUsageStats(Map.of()); + + public static RepositoryUsageStats readFrom(StreamInput in) throws IOException { + final var statsByType = in.readMap(i -> i.readMap(StreamInput::readVLong)); + if (statsByType.isEmpty()) { + return EMPTY; + } else { + return new RepositoryUsageStats(statsByType); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(statsByType, (o, m) -> o.writeMap(m, StreamOutput::writeVLong)); + } + + public boolean isEmpty() { + return statsByType.isEmpty(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + for (Map.Entry> typeAndStats : statsByType.entrySet()) { + builder.startObject(typeAndStats.getKey()); + for (Map.Entry statAndValue : typeAndStats.getValue().entrySet()) { + builder.field(statAndValue.getKey(), statAndValue.getValue()); + } + builder.endObject(); + } + return builder.endObject(); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index bcf49bca421f6..1912de3cfa4d2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -41,6 +41,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.node.NodeService; +import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -78,6 +79,7 @@ public class TransportClusterStatsAction extends TransportNodesAction< private final NodeService nodeService; private final IndicesService indicesService; + private final RepositoriesService repositoriesService; private final SearchUsageHolder searchUsageHolder; private final MetadataStatsCache mappingStatsCache; @@ -90,6 +92,7 @@ public TransportClusterStatsAction( TransportService transportService, NodeService nodeService, IndicesService indicesService, + RepositoriesService repositoriesService, UsageService usageService, ActionFilters actionFilters ) { @@ -103,6 +106,7 @@ public TransportClusterStatsAction( ); this.nodeService = nodeService; this.indicesService = indicesService; + this.repositoriesService = repositoriesService; this.searchUsageHolder = usageService.getSearchUsageHolder(); this.mappingStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), MappingStats::of); this.analysisStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), AnalysisStats::of); @@ -237,12 +241,14 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq } } - ClusterHealthStatus clusterStatus = null; - if (clusterService.state().nodes().isLocalNodeElectedMaster()) { - clusterStatus = new ClusterStateHealth(clusterService.state()).getStatus(); - } + final ClusterState clusterState = clusterService.state(); + final ClusterHealthStatus clusterStatus = clusterState.nodes().isLocalNodeElectedMaster() + ? new ClusterStateHealth(clusterState).getStatus() + : null; + + final SearchUsageStats searchUsageStats = searchUsageHolder.getSearchUsageStats(); - SearchUsageStats searchUsageStats = searchUsageHolder.getSearchUsageStats(); + final RepositoryUsageStats repositoryUsageStats = repositoriesService.getUsageStats(); return new ClusterStatsNodeResponse( nodeInfo.getNode(), @@ -250,7 +256,8 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq nodeInfo, nodeStats, shardsStats.toArray(new ShardStats[shardsStats.size()]), - searchUsageStats + searchUsageStats, + repositoryUsageStats ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java index d025ddab26af6..c53395b5d76c1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java @@ -19,7 +19,7 @@ public enum ClusterHealthStatus implements Writeable { YELLOW((byte) 1), RED((byte) 2); - private byte value; + private final byte value; ClusterHealthStatus(byte value) { this.value = value; diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java new file mode 100644 index 0000000000000..141dac0c5c430 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories; + +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Set; + +public class RepositoriesFeatures implements FeatureSpecification { + public static final NodeFeature SUPPORTS_REPOSITORIES_USAGE_STATS = new NodeFeature("repositories.supports_usage_stats"); + + @Override + public Set getFeatures() { + return Set.of(SUPPORTS_REPOSITORIES_USAGE_STATS); + } +} diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index de4ae1051ba62..732a18dffe233 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.stats.RepositoryUsageStats; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.node.NodeClient; @@ -944,15 +945,33 @@ public List> getPreRestoreVersionChecks() { return preRestoreChecks; } - @Override - protected void doStart() { + public static String COUNT_USAGE_STATS_NAME = "count"; + public RepositoryUsageStats getUsageStats() { + if (repositories.isEmpty()) { + return RepositoryUsageStats.EMPTY; + } + final var statsByType = new HashMap>(); + for (final var repository : repositories.values()) { + final var repositoryType = repository.getMetadata().type(); + final var typeStats = statsByType.computeIfAbsent(repositoryType, ignored -> new HashMap<>()); + typeStats.compute(COUNT_USAGE_STATS_NAME, (k, count) -> (count == null ? 0L : count) + 1); + final var repositoryUsageTags = repository.getUsageFeatures(); + assert repositoryUsageTags.contains(COUNT_USAGE_STATS_NAME) == false : repositoryUsageTags; + for (final var repositoryUsageTag : repositoryUsageTags) { + typeStats.compute(repositoryUsageTag, (k, count) -> (count == null ? 0L : count) + 1); + } + } + return new RepositoryUsageStats( + statsByType.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> Map.copyOf(e.getValue()))) + ); } @Override - protected void doStop() { + protected void doStart() {} - } + @Override + protected void doStop() {} @Override protected void doClose() throws IOException { diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index fd52c21cad3f8..09f4782b6e5fa 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -312,6 +312,14 @@ void cloneShardSnapshot( */ void awaitIdle(); + /** + * @return a set of the names of the features that this repository instance uses, for reporting in the cluster stats for telemetry + * collection. + */ + default Set getUsageFeatures() { + return Set.of(); + } + static boolean assertSnapshotMetaThread() { return ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SNAPSHOT_META); } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index e8af752bec179..cc56e940530e8 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -3943,4 +3943,29 @@ public String getAnalysisFailureExtraDetail() { ReferenceDocs.SNAPSHOT_REPOSITORY_ANALYSIS ); } + + public static final String READ_ONLY_USAGE_STATS_NAME = "read_only"; + public static final String READ_WRITE_USAGE_STATS_NAME = "read_write"; + + @Override + public final Set getUsageFeatures() { + final var extraUsageFeatures = getExtraUsageFeatures(); + assert extraUsageFeatures.contains(READ_ONLY_USAGE_STATS_NAME) == false : extraUsageFeatures; + assert extraUsageFeatures.contains(READ_WRITE_USAGE_STATS_NAME) == false : extraUsageFeatures; + return Set.copyOf( + Stream.concat(Stream.of(isReadOnly() ? READ_ONLY_USAGE_STATS_NAME : READ_WRITE_USAGE_STATS_NAME), extraUsageFeatures.stream()) + .toList() + ); + } + + /** + * All blob-store repositories include the counts of read-only and read-write repositories in their telemetry. This method returns other + * features of the repositories in use. + * + * @return a set of the names of the extra features that this repository instance uses, for reporting in the cluster stats for telemetry + * collection. + */ + protected Set getExtraUsageFeatures() { + return Set.of(); + } } diff --git a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification index baf7e53345944..90a1c29972ff3 100644 --- a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification +++ b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -13,6 +13,7 @@ org.elasticsearch.cluster.service.TransportFeatures org.elasticsearch.cluster.metadata.MetadataFeatures org.elasticsearch.rest.RestFeatures org.elasticsearch.indices.IndicesFeatures +org.elasticsearch.repositories.RepositoriesFeatures org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures org.elasticsearch.index.mapper.MapperFeatures org.elasticsearch.ingest.IngestGeoIpFeatures diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java index 49528c204b042..20eae9833e4b0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java @@ -127,7 +127,8 @@ public void testCreation() { null, null, new ShardStats[] { shardStats }, - null + new SearchUsageStats(), + RepositoryUsageStats.EMPTY ); stats = VersionStats.of(metadata, Collections.singletonList(nodeResponse)); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index c89638045a5a8..4a695f7c51e4c 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsNodeResponse; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; import org.elasticsearch.action.admin.cluster.stats.MappingStats; +import org.elasticsearch.action.admin.cluster.stats.RepositoryUsageStats; import org.elasticsearch.action.admin.cluster.stats.SearchUsageStats; import org.elasticsearch.action.admin.cluster.stats.VersionStats; import org.elasticsearch.action.admin.indices.stats.CommonStats; @@ -420,6 +421,7 @@ public void testToXContent() throws IOException { when(mockNodeResponse.nodeStats()).thenReturn(mockNodeStats); when(mockNodeResponse.shardsStats()).thenReturn(new ShardStats[] { mockShardStats }); when(mockNodeResponse.searchUsageStats()).thenReturn(new SearchUsageStats()); + when(mockNodeResponse.repositoryUsageStats()).thenReturn(RepositoryUsageStats.EMPTY); final Metadata metadata = testClusterState.metadata(); final ClusterStatsResponse clusterStatsResponse = new ClusterStatsResponse( @@ -533,7 +535,9 @@ public void testToXContent() throws IOException { "fielddata": { "memory_size_in_bytes": 1, "evictions": 0, - "global_ordinals":{"build_time_in_millis":1} + "global_ordinals": { + "build_time_in_millis": 1 + } }, "query_cache": { "memory_size_in_bytes": 0, @@ -563,9 +567,9 @@ public void testToXContent() throws IOException { "file_sizes": {} }, "mappings": { - "total_field_count" : 0, - "total_deduplicated_field_count" : 0, - "total_deduplicated_mapping_size_in_bytes" : 0, + "total_field_count": 0, + "total_deduplicated_field_count": 0, + "total_deduplicated_mapping_size_in_bytes": 0, "field_types": [], "runtime_field_types": [] }, @@ -581,11 +585,11 @@ public void testToXContent() throws IOException { "synonyms": {} }, "versions": [], - "search" : { - "total" : 0, - "queries" : {}, - "rescorers" : {}, - "sections" : {} + "search": { + "total": 0, + "queries": {}, + "rescorers": {}, + "sections": {} }, "dense_vector": { "value_count": 0 @@ -749,7 +753,8 @@ public void testToXContent() throws IOException { "cleanups": 0 }, "repositories": {} - } + }, + "repositories": {} }, "cluster_state": { "nodes_hash": 1314980060, diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java index ecc8401e1d79a..a9b8fe51c01cc 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java @@ -8,6 +8,8 @@ import fixture.azure.AzureHttpFixture; +import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.client.Request; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Booleans; @@ -15,15 +17,20 @@ import org.elasticsearch.test.TestTrustStore; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.rest.ObjectPath; +import org.hamcrest.Matcher; import org.junit.ClassRule; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; +import java.io.IOException; import java.util.Map; import java.util.function.Predicate; import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; public class AzureRepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTestCase { private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.azure.fixture", "true")); @@ -119,4 +126,34 @@ protected Settings repositorySettings() { return Settings.builder().put("client", "repository_test_kit").put("container", container).put("base_path", basePath).build(); } + + public void testClusterStats() throws IOException { + registerRepository(randomIdentifier(), repositoryType(), true, repositorySettings()); + + final var request = new Request(HttpGet.METHOD_NAME, "/_cluster/stats"); + final var response = client().performRequest(request); + assertOK(response); + + final var objectPath = ObjectPath.createFromResponse(response); + assertThat(objectPath.evaluate("repositories.azure.count"), isSetIff(true)); + assertThat(objectPath.evaluate("repositories.azure.read_write"), isSetIff(true)); + + assertThat(objectPath.evaluate("repositories.azure.uses_key_credentials"), isSetIff(Strings.hasText(AZURE_TEST_KEY))); + assertThat(objectPath.evaluate("repositories.azure.uses_sas_token"), isSetIff(Strings.hasText(AZURE_TEST_SASTOKEN))); + assertThat( + objectPath.evaluate("repositories.azure.uses_default_credentials"), + isSetIff((Strings.hasText(AZURE_TEST_SASTOKEN) || Strings.hasText(AZURE_TEST_KEY)) == false) + ); + assertThat( + objectPath.evaluate("repositories.azure.uses_managed_identity"), + isSetIff( + (Strings.hasText(AZURE_TEST_SASTOKEN) || Strings.hasText(AZURE_TEST_KEY) || Strings.hasText(AZURE_TEST_CLIENT_ID)) == false + ) + ); + assertThat(objectPath.evaluate("repositories.azure.uses_workload_identity"), isSetIff(Strings.hasText(AZURE_TEST_CLIENT_ID))); + } + + private static Matcher isSetIff(boolean predicate) { + return predicate ? equalTo(1) : nullValue(Integer.class); + } } From b7e1d5593b42f03aecc387160af6f452c4d25351 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Tue, 27 Aug 2024 15:45:53 +0200 Subject: [PATCH 226/389] Fix connection timeout for OpenIdConnectAuthenticator get Userinfo (#112230) * Fix connection timeout for OpenIdConnectAuthenticator get Userinfo * Update docs/changelog/112230.yaml --- docs/changelog/112230.yaml | 5 +++++ .../security/authc/oidc/OpenIdConnectAuthenticator.java | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/112230.yaml diff --git a/docs/changelog/112230.yaml b/docs/changelog/112230.yaml new file mode 100644 index 0000000000000..ef12dc3f78267 --- /dev/null +++ b/docs/changelog/112230.yaml @@ -0,0 +1,5 @@ +pr: 112230 +summary: Fix connection timeout for `OpenIdConnectAuthenticator` get Userinfo +area: Security +type: bug +issues: [] diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java index 0f34850b861b7..c2e0caf7234cb 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java @@ -718,7 +718,7 @@ private CloseableHttpAsyncClient createHttpClient() { connectionManager.setMaxTotal(realmConfig.getSetting(HTTP_MAX_CONNECTIONS)); final RequestConfig requestConfig = RequestConfig.custom() .setConnectTimeout(Math.toIntExact(realmConfig.getSetting(HTTP_CONNECT_TIMEOUT).getMillis())) - .setConnectionRequestTimeout(Math.toIntExact(realmConfig.getSetting(HTTP_CONNECTION_READ_TIMEOUT).getSeconds())) + .setConnectionRequestTimeout(Math.toIntExact(realmConfig.getSetting(HTTP_CONNECTION_READ_TIMEOUT).getMillis())) .setSocketTimeout(Math.toIntExact(realmConfig.getSetting(HTTP_SOCKET_TIMEOUT).getMillis())) .build(); From b14bada16f3c66598e18393d8d30271a81096ec3 Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Tue, 27 Aug 2024 10:44:29 -0400 Subject: [PATCH 227/389] [ML] Update inference interfaces for streaming (#112234) Using InferenceServiceResults and InferenceAction to stream ChunkedToXContent through to the Rest handler. --- .../inference/InferenceServiceResults.java | 24 ++++++++++++++++--- .../inference/action/InferenceAction.java | 20 ++++++++++++++++ 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java b/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java index f8330404c1538..0000e0ddc9af9 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java @@ -13,17 +13,18 @@ import java.util.List; import java.util.Map; +import java.util.concurrent.Flow; public interface InferenceServiceResults extends NamedWriteable, ChunkedToXContent { /** - * Transform the result to match the format required for the TransportCoordinatedInferenceAction. + *

    Transform the result to match the format required for the TransportCoordinatedInferenceAction. * For the inference plugin TextEmbeddingResults, the {@link #transformToLegacyFormat()} transforms the * results into an intermediate format only used by the plugin's return value. It doesn't align with what the * TransportCoordinatedInferenceAction expects. TransportCoordinatedInferenceAction expects an ml plugin - * TextEmbeddingResults. + * TextEmbeddingResults.

    * - * For other results like SparseEmbeddingResults, this method can be a pass through to the transformToLegacyFormat. + *

    For other results like SparseEmbeddingResults, this method can be a pass through to the transformToLegacyFormat.

    */ List transformToCoordinationFormat(); @@ -37,4 +38,21 @@ public interface InferenceServiceResults extends NamedWriteable, ChunkedToXConte * Convert the result to a map to aid with test assertions */ Map asMap(); + + /** + * Returns {@code true} if these results are streamed as chunks, or {@code false} if these results contain the entire payload. + * Defaults to {@code false}. + */ + default boolean isStreaming() { + return false; + } + + /** + * When {@link #isStreaming()} is {@code true}, the InferenceAction.Results will subscribe to this publisher. + * Implementations should follow the {@link java.util.concurrent.Flow.Publisher} spec to stream the chunks. + */ + default Flow.Publisher publisher() { + assert isStreaming() == false : "This must be implemented when isStreaming() == true"; + throw new UnsupportedOperationException("This must be implemented when isStreaming() == true"); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java index 7ecb5aef4ce8d..c38f508db1b6a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.TimeValue; @@ -40,6 +41,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.Flow; import static org.elasticsearch.core.Strings.format; @@ -391,6 +393,24 @@ public InferenceServiceResults getResults() { return results; } + /** + * Returns {@code true} if these results are streamed as chunks, or {@code false} if these results contain the entire payload. + * Currently set to false while it is being implemented. + */ + public boolean isStreaming() { + return false; + } + + /** + * When {@link #isStreaming()} is {@code true}, the RestHandler will subscribe to this publisher. + * When the RestResponse is finished with the current chunk, it will request the next chunk using the subscription. + * If the RestResponse is closed, it will cancel the subscription. + */ + public Flow.Publisher publisher() { + assert isStreaming() == false : "This must be implemented when isStreaming() == true"; + throw new UnsupportedOperationException("This must be implemented when isStreaming() == true"); + } + @Override public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { From b43470feeb82d602f549b6dfee9243d9afa6ce25 Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Tue, 27 Aug 2024 07:55:50 -0700 Subject: [PATCH 228/389] Fix nested field generation in StandardVersusLogsIndexModeRandomDataChallengeRestIT (#112223) --- .../logsdb/datageneration/fields/Context.java | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/Context.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/Context.java index 647d5bff152d1..62130967508f6 100644 --- a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/Context.java +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/Context.java @@ -13,6 +13,7 @@ import org.elasticsearch.logsdb.datageneration.datasource.DataSourceResponse; import java.util.Optional; +import java.util.concurrent.atomic.AtomicInteger; class Context { private final DataGeneratorSpecification specification; @@ -21,13 +22,14 @@ class Context { private final DataSourceResponse.FieldTypeGenerator fieldTypeGenerator; private final DataSourceResponse.ObjectArrayGenerator objectArrayGenerator; private final int objectDepth; - private final int nestedFieldsCount; + // We don't need atomicity, but we need to pass counter by reference to accumulate total value from sub-objects. + private final AtomicInteger nestedFieldsCount; Context(DataGeneratorSpecification specification) { - this(specification, 0, 0); + this(specification, 0, new AtomicInteger(0)); } - private Context(DataGeneratorSpecification specification, int objectDepth, int nestedFieldsCount) { + private Context(DataGeneratorSpecification specification, int objectDepth, AtomicInteger nestedFieldsCount) { this.specification = specification; this.childFieldGenerator = specification.dataSource().get(new DataSourceRequest.ChildFieldGenerator(specification)); this.fieldTypeGenerator = specification.dataSource().get(new DataSourceRequest.FieldTypeGenerator()); @@ -53,7 +55,8 @@ public Context subObject() { } public Context nestedObject() { - return new Context(specification, objectDepth + 1, nestedFieldsCount + 1); + nestedFieldsCount.incrementAndGet(); + return new Context(specification, objectDepth + 1, nestedFieldsCount); } public boolean shouldAddObjectField() { @@ -63,7 +66,7 @@ public boolean shouldAddObjectField() { public boolean shouldAddNestedField() { return childFieldGenerator.generateNestedSubObject() && objectDepth < specification.maxObjectDepth() - && nestedFieldsCount < specification.nestedFieldsLimit(); + && nestedFieldsCount.get() < specification.nestedFieldsLimit(); } public Optional generateObjectArray() { From ed515138160da2b2431fd93462d3f3b7178e2e1b Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 27 Aug 2024 10:57:17 -0400 Subject: [PATCH 229/389] ESQL: Remove `LogicalPlan` from old serialization (#112237) This removes `LogicalPlan` subclasses from `PlanNamedTypes` because it is no longer used. --- .../xpack/esql/io/stream/PlanNamedTypes.java | 35 +------------ .../esql/io/stream/PlanNamedTypesTests.java | 52 ------------------- 2 files changed, 1 insertion(+), 86 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 180ba8c028e6a..77d982453203c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -23,24 +23,9 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.Order; import org.elasticsearch.xpack.esql.index.EsIndex; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; -import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plan.logical.EsRelation; -import org.elasticsearch.xpack.esql.plan.logical.Eval; -import org.elasticsearch.xpack.esql.plan.logical.Filter; import org.elasticsearch.xpack.esql.plan.logical.Grok; -import org.elasticsearch.xpack.esql.plan.logical.InlineStats; -import org.elasticsearch.xpack.esql.plan.logical.Limit; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.plan.logical.Lookup; -import org.elasticsearch.xpack.esql.plan.logical.MvExpand; -import org.elasticsearch.xpack.esql.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.plan.logical.Project; -import org.elasticsearch.xpack.esql.plan.logical.TopN; -import org.elasticsearch.xpack.esql.plan.logical.join.Join; -import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.DissectExec; import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; @@ -132,25 +117,7 @@ public static List namedTypeEntries() { of(PhysicalPlan.class, ProjectExec.class, PlanNamedTypes::writeProjectExec, PlanNamedTypes::readProjectExec), of(PhysicalPlan.class, RowExec.class, PlanNamedTypes::writeRowExec, PlanNamedTypes::readRowExec), of(PhysicalPlan.class, ShowExec.class, PlanNamedTypes::writeShowExec, PlanNamedTypes::readShowExec), - of(PhysicalPlan.class, TopNExec.class, PlanNamedTypes::writeTopNExec, PlanNamedTypes::readTopNExec), - // Logical Plan Nodes - a subset of plans that end up being actually serialized - of(LogicalPlan.class, Aggregate.ENTRY), - of(LogicalPlan.class, Dissect.ENTRY), - of(LogicalPlan.class, EsRelation.ENTRY), - of(LogicalPlan.class, Eval.ENTRY), - of(LogicalPlan.class, Enrich.ENTRY), - of(LogicalPlan.class, EsqlProject.ENTRY), - of(LogicalPlan.class, Filter.ENTRY), - of(LogicalPlan.class, Grok.ENTRY), - of(LogicalPlan.class, InlineStats.ENTRY), - of(LogicalPlan.class, Join.ENTRY), - of(LogicalPlan.class, Limit.ENTRY), - of(LogicalPlan.class, LocalRelation.ENTRY), - of(LogicalPlan.class, Lookup.ENTRY), - of(LogicalPlan.class, MvExpand.ENTRY), - of(LogicalPlan.class, OrderBy.ENTRY), - of(LogicalPlan.class, Project.ENTRY), - of(LogicalPlan.class, TopN.ENTRY) + of(PhysicalPlan.class, TopNExec.class, PlanNamedTypes::writeTopNExec, PlanNamedTypes::readTopNExec) ); return declared; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java index e5f195b053349..56ab1bd41693e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java @@ -38,24 +38,6 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; -import org.elasticsearch.xpack.esql.plan.logical.Dissect; -import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plan.logical.EsRelation; -import org.elasticsearch.xpack.esql.plan.logical.Eval; -import org.elasticsearch.xpack.esql.plan.logical.Filter; -import org.elasticsearch.xpack.esql.plan.logical.Grok; -import org.elasticsearch.xpack.esql.plan.logical.InlineStats; -import org.elasticsearch.xpack.esql.plan.logical.Limit; -import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.plan.logical.Lookup; -import org.elasticsearch.xpack.esql.plan.logical.MvExpand; -import org.elasticsearch.xpack.esql.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.plan.logical.Project; -import org.elasticsearch.xpack.esql.plan.logical.TopN; -import org.elasticsearch.xpack.esql.plan.logical.join.Join; -import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.DissectExec; import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; @@ -130,40 +112,6 @@ public void testPhysicalPlanEntries() { assertMap(actual, matchesList(expected)); } - // List of known serializable logical plan nodes - this should be kept up to date or retrieved - // programmatically. - public static final List> LOGICAL_PLAN_NODE_CLS = List.of( - Aggregate.class, - Dissect.class, - Enrich.class, - EsRelation.class, - EsqlProject.class, - Eval.class, - Filter.class, - Grok.class, - InlineStats.class, - Join.class, - Limit.class, - LocalRelation.class, - Lookup.class, - MvExpand.class, - OrderBy.class, - Project.class, - TopN.class - ); - - // Tests that all logical plan nodes have a suitably named serialization entry. - public void testLogicalPlanEntries() { - var expected = LOGICAL_PLAN_NODE_CLS.stream().map(Class::getSimpleName).toList(); - var actual = PlanNamedTypes.namedTypeEntries() - .stream() - .filter(e -> e.categoryClass().isAssignableFrom(LogicalPlan.class)) - .map(PlanNameRegistry.Entry::name) - .sorted() - .toList(); - assertMap(actual, matchesList(expected)); - } - // Tests that all names are unique - there should be a good reason if this is not the case. public void testUniqueNames() { var actual = PlanNamedTypes.namedTypeEntries().stream().map(PlanNameRegistry.Entry::name).distinct().toList(); From bd2d6aa55fdf839ca42ebf04a6493732b6c94b24 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Tue, 27 Aug 2024 09:14:49 -0600 Subject: [PATCH 230/389] Fix template alias parsing livelock (#112217) * Fix template alias parsing livelock This commit fixes an issue with templates parsing alias definitions that can cause the ES thread to hang indefinitely. Due to the malformed alias definition, the parsing gets into a loop which never exits. In this commit a null check in both the component template and alias parsing code is added, which prevents the looping. --- docs/changelog/112217.yaml | 5 +++++ .../cluster/metadata/AliasMetadata.java | 2 ++ .../cluster/metadata/Template.java | 6 +++++- .../metadata/ComponentTemplateTests.java | 19 +++++++++++++++++++ 4 files changed, 31 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/112217.yaml diff --git a/docs/changelog/112217.yaml b/docs/changelog/112217.yaml new file mode 100644 index 0000000000000..bb367d6128001 --- /dev/null +++ b/docs/changelog/112217.yaml @@ -0,0 +1,5 @@ +pr: 112217 +summary: Fix template alias parsing livelock +area: Indices APIs +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java index a0f4a929dafdb..ff412d629b3b1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java @@ -396,6 +396,8 @@ public static AliasMetadata fromXContent(XContentParser parser) throws IOExcepti } else if ("is_hidden".equals(currentFieldName)) { builder.isHidden(parser.booleanValue()); } + } else if (token == null) { + throw new IllegalArgumentException("unexpected null token while parsing alias"); } } return builder.build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java index 70440adc4ebbe..b044ef6042428 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java @@ -70,7 +70,11 @@ public class Template implements SimpleDiffable